mallocx.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696
  1. /*
  2. * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
  3. * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
  4. * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
  5. * Copyright (c) 2000 by Hewlett-Packard Company. All rights reserved.
  6. *
  7. * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
  8. * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
  9. *
  10. * Permission is hereby granted to use or copy this program
  11. * for any purpose, provided the above notices are retained on all copies.
  12. * Permission to modify the code and to distribute modified code is granted,
  13. * provided the above notices are retained, and a notice that the code was
  14. * modified is included with the above copyright notice.
  15. */
  16. /*
  17. * These are extra allocation routines which are likely to be less
  18. * frequently used than those in malloc.c. They are separate in the
  19. * hope that the .o file will be excluded from statically linked
  20. * executables. We should probably break this up further.
  21. */
  22. #include <stdio.h>
  23. #include "private/gc_priv.h"
  24. extern ptr_t GC_clear_stack(); /* in misc.c, behaves like identity */
  25. void GC_extend_size_map(); /* in misc.c. */
  26. GC_bool GC_alloc_reclaim_list(); /* in malloc.c */
  27. /* Some externally visible but unadvertised variables to allow access to */
  28. /* free lists from inlined allocators without including gc_priv.h */
  29. /* or introducing dependencies on internal data structure layouts. */
  30. ptr_t * GC_CONST GC_objfreelist_ptr = GC_objfreelist;
  31. ptr_t * GC_CONST GC_aobjfreelist_ptr = GC_aobjfreelist;
  32. ptr_t * GC_CONST GC_uobjfreelist_ptr = GC_uobjfreelist;
  33. # ifdef ATOMIC_UNCOLLECTABLE
  34. ptr_t * GC_CONST GC_auobjfreelist_ptr = GC_auobjfreelist;
  35. # endif
  36. GC_PTR GC_generic_or_special_malloc(lb,knd)
  37. word lb;
  38. int knd;
  39. {
  40. switch(knd) {
  41. # ifdef STUBBORN_ALLOC
  42. case STUBBORN:
  43. return(GC_malloc_stubborn((size_t)lb));
  44. # endif
  45. case PTRFREE:
  46. return(GC_malloc_atomic((size_t)lb));
  47. case NORMAL:
  48. return(GC_malloc((size_t)lb));
  49. case UNCOLLECTABLE:
  50. return(GC_malloc_uncollectable((size_t)lb));
  51. # ifdef ATOMIC_UNCOLLECTABLE
  52. case AUNCOLLECTABLE:
  53. return(GC_malloc_atomic_uncollectable((size_t)lb));
  54. # endif /* ATOMIC_UNCOLLECTABLE */
  55. default:
  56. return(GC_generic_malloc(lb,knd));
  57. }
  58. }
  59. /* Change the size of the block pointed to by p to contain at least */
  60. /* lb bytes. The object may be (and quite likely will be) moved. */
  61. /* The kind (e.g. atomic) is the same as that of the old. */
  62. /* Shrinking of large blocks is not implemented well. */
  63. # ifdef __STDC__
  64. GC_PTR GC_realloc(GC_PTR p, size_t lb)
  65. # else
  66. GC_PTR GC_realloc(p,lb)
  67. GC_PTR p;
  68. size_t lb;
  69. # endif
  70. {
  71. register struct hblk * h;
  72. register hdr * hhdr;
  73. register word sz; /* Current size in bytes */
  74. register word orig_sz; /* Original sz in bytes */
  75. int obj_kind;
  76. if (p == 0) return(GC_malloc(lb)); /* Required by ANSI */
  77. h = HBLKPTR(p);
  78. hhdr = HDR(h);
  79. sz = hhdr -> hb_sz;
  80. obj_kind = hhdr -> hb_obj_kind;
  81. sz = WORDS_TO_BYTES(sz);
  82. orig_sz = sz;
  83. if (sz > MAXOBJBYTES) {
  84. /* Round it up to the next whole heap block */
  85. register word descr;
  86. sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
  87. hhdr -> hb_sz = BYTES_TO_WORDS(sz);
  88. descr = GC_obj_kinds[obj_kind].ok_descriptor;
  89. if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
  90. hhdr -> hb_descr = descr;
  91. if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
  92. /* Extra area is already cleared by GC_alloc_large_and_clear. */
  93. }
  94. if (ADD_SLOP(lb) <= sz) {
  95. if (lb >= (sz >> 1)) {
  96. # ifdef STUBBORN_ALLOC
  97. if (obj_kind == STUBBORN) GC_change_stubborn(p);
  98. # endif
  99. if (orig_sz > lb) {
  100. /* Clear unneeded part of object to avoid bogus pointer */
  101. /* tracing. */
  102. /* Safe for stubborn objects. */
  103. BZERO(((ptr_t)p) + lb, orig_sz - lb);
  104. }
  105. return(p);
  106. } else {
  107. /* shrink */
  108. GC_PTR result =
  109. GC_generic_or_special_malloc((word)lb, obj_kind);
  110. if (result == 0) return(0);
  111. /* Could also return original object. But this */
  112. /* gives the client warning of imminent disaster. */
  113. BCOPY(p, result, lb);
  114. # ifndef IGNORE_FREE
  115. GC_free(p);
  116. # endif
  117. return(result);
  118. }
  119. } else {
  120. /* grow */
  121. GC_PTR result =
  122. GC_generic_or_special_malloc((word)lb, obj_kind);
  123. if (result == 0) return(0);
  124. BCOPY(p, result, sz);
  125. # ifndef IGNORE_FREE
  126. GC_free(p);
  127. # endif
  128. return(result);
  129. }
  130. }
  131. # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
  132. # define REDIRECT_REALLOC GC_realloc
  133. # endif
  134. # ifdef REDIRECT_REALLOC
  135. /* As with malloc, avoid two levels of extra calls here. */
  136. # ifdef GC_ADD_CALLER
  137. # define RA GC_RETURN_ADDR,
  138. # else
  139. # define RA
  140. # endif
  141. # define GC_debug_realloc_replacement(p, lb) \
  142. GC_debug_realloc(p, lb, RA "unknown", 0)
  143. # ifdef __STDC__
  144. GC_PTR realloc(GC_PTR p, size_t lb)
  145. # else
  146. GC_PTR realloc(p,lb)
  147. GC_PTR p;
  148. size_t lb;
  149. # endif
  150. {
  151. return(REDIRECT_REALLOC(p, lb));
  152. }
  153. # undef GC_debug_realloc_replacement
  154. # endif /* REDIRECT_REALLOC */
  155. /* Allocate memory such that only pointers to near the */
  156. /* beginning of the object are considered. */
  157. /* We avoid holding allocation lock while we clear memory. */
  158. ptr_t GC_generic_malloc_ignore_off_page(lb, k)
  159. register size_t lb;
  160. register int k;
  161. {
  162. register ptr_t result;
  163. word lw;
  164. word n_blocks;
  165. GC_bool init;
  166. DCL_LOCK_STATE;
  167. if (SMALL_OBJ(lb))
  168. return(GC_generic_malloc((word)lb, k));
  169. lw = ROUNDED_UP_WORDS(lb);
  170. n_blocks = OBJ_SZ_TO_BLOCKS(lw);
  171. init = GC_obj_kinds[k].ok_init;
  172. if (GC_have_errors) GC_print_all_errors();
  173. GC_INVOKE_FINALIZERS();
  174. DISABLE_SIGNALS();
  175. LOCK();
  176. result = (ptr_t)GC_alloc_large(lw, k, IGNORE_OFF_PAGE);
  177. if (0 != result) {
  178. if (GC_debugging_started) {
  179. BZERO(result, n_blocks * HBLKSIZE);
  180. } else {
  181. # ifdef THREADS
  182. /* Clear any memory that might be used for GC descriptors */
  183. /* before we release the lock. */
  184. ((word *)result)[0] = 0;
  185. ((word *)result)[1] = 0;
  186. ((word *)result)[lw-1] = 0;
  187. ((word *)result)[lw-2] = 0;
  188. # endif
  189. }
  190. }
  191. GC_words_allocd += lw;
  192. UNLOCK();
  193. ENABLE_SIGNALS();
  194. if (0 == result) {
  195. return((*GC_oom_fn)(lb));
  196. } else {
  197. if (init && !GC_debugging_started) {
  198. BZERO(result, n_blocks * HBLKSIZE);
  199. }
  200. return(result);
  201. }
  202. }
  203. # if defined(__STDC__) || defined(__cplusplus)
  204. void * GC_malloc_ignore_off_page(size_t lb)
  205. # else
  206. char * GC_malloc_ignore_off_page(lb)
  207. register size_t lb;
  208. # endif
  209. {
  210. return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, NORMAL));
  211. }
  212. # if defined(__STDC__) || defined(__cplusplus)
  213. void * GC_malloc_atomic_ignore_off_page(size_t lb)
  214. # else
  215. char * GC_malloc_atomic_ignore_off_page(lb)
  216. register size_t lb;
  217. # endif
  218. {
  219. return((GC_PTR)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
  220. }
  221. /* Increment GC_words_allocd from code that doesn't have direct access */
  222. /* to GC_arrays. */
  223. # ifdef __STDC__
  224. void GC_incr_words_allocd(size_t n)
  225. {
  226. GC_words_allocd += n;
  227. }
  228. /* The same for GC_mem_freed. */
  229. void GC_incr_mem_freed(size_t n)
  230. {
  231. GC_mem_freed += n;
  232. }
  233. # endif /* __STDC__ */
  234. /* Analogous to the above, but assumes a small object size, and */
  235. /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
  236. ptr_t GC_generic_malloc_words_small_inner(lw, k)
  237. register word lw;
  238. register int k;
  239. {
  240. register ptr_t op;
  241. register ptr_t *opp;
  242. register struct obj_kind * kind = GC_obj_kinds + k;
  243. opp = &(kind -> ok_freelist[lw]);
  244. if( (op = *opp) == 0 ) {
  245. if (!GC_is_initialized) {
  246. GC_init_inner();
  247. }
  248. if (kind -> ok_reclaim_list != 0 || GC_alloc_reclaim_list(kind)) {
  249. op = GC_clear_stack(GC_allocobj((word)lw, k));
  250. }
  251. if (op == 0) {
  252. UNLOCK();
  253. ENABLE_SIGNALS();
  254. return ((*GC_oom_fn)(WORDS_TO_BYTES(lw)));
  255. }
  256. }
  257. *opp = obj_link(op);
  258. obj_link(op) = 0;
  259. GC_words_allocd += lw;
  260. return((ptr_t)op);
  261. }
  262. /* Analogous to the above, but assumes a small object size, and */
  263. /* bypasses MERGE_SIZES mechanism. Used by gc_inline.h. */
  264. #ifdef __STDC__
  265. ptr_t GC_generic_malloc_words_small(size_t lw, int k)
  266. #else
  267. ptr_t GC_generic_malloc_words_small(lw, k)
  268. register word lw;
  269. register int k;
  270. #endif
  271. {
  272. register ptr_t op;
  273. DCL_LOCK_STATE;
  274. if (GC_have_errors) GC_print_all_errors();
  275. GC_INVOKE_FINALIZERS();
  276. DISABLE_SIGNALS();
  277. LOCK();
  278. op = GC_generic_malloc_words_small_inner(lw, k);
  279. UNLOCK();
  280. ENABLE_SIGNALS();
  281. return((ptr_t)op);
  282. }
  283. #if defined(THREADS) && !defined(SRC_M3)
  284. extern signed_word GC_mem_found; /* Protected by GC lock. */
  285. #ifdef PARALLEL_MARK
  286. volatile signed_word GC_words_allocd_tmp = 0;
  287. /* Number of words of memory allocated since */
  288. /* we released the GC lock. Instead of */
  289. /* reacquiring the GC lock just to add this in, */
  290. /* we add it in the next time we reacquire */
  291. /* the lock. (Atomically adding it doesn't */
  292. /* work, since we would have to atomically */
  293. /* update it in GC_malloc, which is too */
  294. /* expensive. */
  295. #endif /* PARALLEL_MARK */
  296. /* See reclaim.c: */
  297. extern ptr_t GC_reclaim_generic();
  298. /* Return a list of 1 or more objects of the indicated size, linked */
  299. /* through the first word in the object. This has the advantage that */
  300. /* it acquires the allocation lock only once, and may greatly reduce */
  301. /* time wasted contending for the allocation lock. Typical usage would */
  302. /* be in a thread that requires many items of the same size. It would */
  303. /* keep its own free list in thread-local storage, and call */
  304. /* GC_malloc_many or friends to replenish it. (We do not round up */
  305. /* object sizes, since a call indicates the intention to consume many */
  306. /* objects of exactly this size.) */
  307. /* We return the free-list by assigning it to *result, since it is */
  308. /* not safe to return, e.g. a linked list of pointer-free objects, */
  309. /* since the collector would not retain the entire list if it were */
  310. /* invoked just as we were returning. */
  311. /* Note that the client should usually clear the link field. */
  312. void GC_generic_malloc_many(lb, k, result)
  313. register word lb;
  314. register int k;
  315. ptr_t *result;
  316. {
  317. ptr_t op;
  318. ptr_t p;
  319. ptr_t *opp;
  320. word lw;
  321. word my_words_allocd = 0;
  322. struct obj_kind * ok = &(GC_obj_kinds[k]);
  323. DCL_LOCK_STATE;
  324. # if defined(GATHERSTATS) || defined(PARALLEL_MARK)
  325. # define COUNT_ARG , &my_words_allocd
  326. # else
  327. # define COUNT_ARG
  328. # define NEED_TO_COUNT
  329. # endif
  330. if (!SMALL_OBJ(lb)) {
  331. op = GC_generic_malloc(lb, k);
  332. if(0 != op) obj_link(op) = 0;
  333. *result = op;
  334. return;
  335. }
  336. lw = ALIGNED_WORDS(lb);
  337. if (GC_have_errors) GC_print_all_errors();
  338. GC_INVOKE_FINALIZERS();
  339. DISABLE_SIGNALS();
  340. LOCK();
  341. if (!GC_is_initialized) GC_init_inner();
  342. /* Do our share of marking work */
  343. if (GC_incremental && !GC_dont_gc) {
  344. ENTER_GC();
  345. GC_collect_a_little_inner(1);
  346. EXIT_GC();
  347. }
  348. /* First see if we can reclaim a page of objects waiting to be */
  349. /* reclaimed. */
  350. {
  351. struct hblk ** rlh = ok -> ok_reclaim_list;
  352. struct hblk * hbp;
  353. hdr * hhdr;
  354. rlh += lw;
  355. while ((hbp = *rlh) != 0) {
  356. hhdr = HDR(hbp);
  357. *rlh = hhdr -> hb_next;
  358. hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
  359. # ifdef PARALLEL_MARK
  360. {
  361. signed_word my_words_allocd_tmp = GC_words_allocd_tmp;
  362. GC_ASSERT(my_words_allocd_tmp >= 0);
  363. /* We only decrement it while holding the GC lock. */
  364. /* Thus we can't accidentally adjust it down in more */
  365. /* than one thread simultaneously. */
  366. if (my_words_allocd_tmp != 0) {
  367. (void)GC_atomic_add(
  368. (volatile GC_word *)(&GC_words_allocd_tmp),
  369. (GC_word)(-my_words_allocd_tmp));
  370. GC_words_allocd += my_words_allocd_tmp;
  371. }
  372. }
  373. GC_acquire_mark_lock();
  374. ++ GC_fl_builder_count;
  375. UNLOCK();
  376. ENABLE_SIGNALS();
  377. GC_release_mark_lock();
  378. # endif
  379. op = GC_reclaim_generic(hbp, hhdr, lw,
  380. ok -> ok_init, 0 COUNT_ARG);
  381. if (op != 0) {
  382. # ifdef NEED_TO_COUNT
  383. /* We are neither gathering statistics, nor marking in */
  384. /* parallel. Thus GC_reclaim_generic doesn't count */
  385. /* for us. */
  386. for (p = op; p != 0; p = obj_link(p)) {
  387. my_words_allocd += lw;
  388. }
  389. # endif
  390. # if defined(GATHERSTATS)
  391. /* We also reclaimed memory, so we need to adjust */
  392. /* that count. */
  393. /* This should be atomic, so the results may be */
  394. /* inaccurate. */
  395. GC_mem_found += my_words_allocd;
  396. # endif
  397. # ifdef PARALLEL_MARK
  398. *result = op;
  399. (void)GC_atomic_add(
  400. (volatile GC_word *)(&GC_words_allocd_tmp),
  401. (GC_word)(my_words_allocd));
  402. GC_acquire_mark_lock();
  403. -- GC_fl_builder_count;
  404. if (GC_fl_builder_count == 0) GC_notify_all_builder();
  405. GC_release_mark_lock();
  406. (void) GC_clear_stack(0);
  407. return;
  408. # else
  409. GC_words_allocd += my_words_allocd;
  410. goto out;
  411. # endif
  412. }
  413. # ifdef PARALLEL_MARK
  414. GC_acquire_mark_lock();
  415. -- GC_fl_builder_count;
  416. if (GC_fl_builder_count == 0) GC_notify_all_builder();
  417. GC_release_mark_lock();
  418. DISABLE_SIGNALS();
  419. LOCK();
  420. /* GC lock is needed for reclaim list access. We */
  421. /* must decrement fl_builder_count before reaquiring GC */
  422. /* lock. Hopefully this path is rare. */
  423. # endif
  424. }
  425. }
  426. /* Next try to use prefix of global free list if there is one. */
  427. /* We don't refill it, but we need to use it up before allocating */
  428. /* a new block ourselves. */
  429. opp = &(GC_obj_kinds[k].ok_freelist[lw]);
  430. if ( (op = *opp) != 0 ) {
  431. *opp = 0;
  432. my_words_allocd = 0;
  433. for (p = op; p != 0; p = obj_link(p)) {
  434. my_words_allocd += lw;
  435. if (my_words_allocd >= BODY_SZ) {
  436. *opp = obj_link(p);
  437. obj_link(p) = 0;
  438. break;
  439. }
  440. }
  441. GC_words_allocd += my_words_allocd;
  442. goto out;
  443. }
  444. /* Next try to allocate a new block worth of objects of this size. */
  445. {
  446. struct hblk *h = GC_allochblk(lw, k, 0);
  447. if (h != 0) {
  448. if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
  449. GC_words_allocd += BYTES_TO_WORDS(HBLKSIZE)
  450. - BYTES_TO_WORDS(HBLKSIZE) % lw;
  451. # ifdef PARALLEL_MARK
  452. GC_acquire_mark_lock();
  453. ++ GC_fl_builder_count;
  454. UNLOCK();
  455. ENABLE_SIGNALS();
  456. GC_release_mark_lock();
  457. # endif
  458. op = GC_build_fl(h, lw, ok -> ok_init, 0);
  459. # ifdef PARALLEL_MARK
  460. *result = op;
  461. GC_acquire_mark_lock();
  462. -- GC_fl_builder_count;
  463. if (GC_fl_builder_count == 0) GC_notify_all_builder();
  464. GC_release_mark_lock();
  465. (void) GC_clear_stack(0);
  466. return;
  467. # else
  468. goto out;
  469. # endif
  470. }
  471. }
  472. /* As a last attempt, try allocating a single object. Note that */
  473. /* this may trigger a collection or expand the heap. */
  474. op = GC_generic_malloc_inner(lb, k);
  475. if (0 != op) obj_link(op) = 0;
  476. out:
  477. *result = op;
  478. UNLOCK();
  479. ENABLE_SIGNALS();
  480. (void) GC_clear_stack(0);
  481. }
  482. GC_PTR GC_malloc_many(size_t lb)
  483. {
  484. ptr_t result;
  485. GC_generic_malloc_many(lb, NORMAL, &result);
  486. return result;
  487. }
  488. /* Note that the "atomic" version of this would be unsafe, since the */
  489. /* links would not be seen by the collector. */
  490. # endif
  491. /* Allocate lb bytes of pointerful, traced, but not collectable data */
  492. # ifdef __STDC__
  493. GC_PTR GC_malloc_uncollectable(size_t lb)
  494. # else
  495. GC_PTR GC_malloc_uncollectable(lb)
  496. size_t lb;
  497. # endif
  498. {
  499. register ptr_t op;
  500. register ptr_t *opp;
  501. register word lw;
  502. DCL_LOCK_STATE;
  503. if( SMALL_OBJ(lb) ) {
  504. # ifdef MERGE_SIZES
  505. if (EXTRA_BYTES != 0 && lb != 0) lb--;
  506. /* We don't need the extra byte, since this won't be */
  507. /* collected anyway. */
  508. lw = GC_size_map[lb];
  509. # else
  510. lw = ALIGNED_WORDS(lb);
  511. # endif
  512. opp = &(GC_uobjfreelist[lw]);
  513. FASTLOCK();
  514. if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
  515. /* See above comment on signals. */
  516. *opp = obj_link(op);
  517. obj_link(op) = 0;
  518. GC_words_allocd += lw;
  519. /* Mark bit ws already set on free list. It will be */
  520. /* cleared only temporarily during a collection, as a */
  521. /* result of the normal free list mark bit clearing. */
  522. GC_non_gc_bytes += WORDS_TO_BYTES(lw);
  523. FASTUNLOCK();
  524. return((GC_PTR) op);
  525. }
  526. FASTUNLOCK();
  527. op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
  528. } else {
  529. op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
  530. }
  531. if (0 == op) return(0);
  532. /* We don't need the lock here, since we have an undisguised */
  533. /* pointer. We do need to hold the lock while we adjust */
  534. /* mark bits. */
  535. {
  536. register struct hblk * h;
  537. h = HBLKPTR(op);
  538. lw = HDR(h) -> hb_sz;
  539. DISABLE_SIGNALS();
  540. LOCK();
  541. GC_set_mark_bit(op);
  542. GC_non_gc_bytes += WORDS_TO_BYTES(lw);
  543. UNLOCK();
  544. ENABLE_SIGNALS();
  545. return((GC_PTR) op);
  546. }
  547. }
  548. #ifdef __STDC__
  549. /* Not well tested nor integrated. */
  550. /* Debug version is tricky and currently missing. */
  551. #include <limits.h>
  552. GC_PTR GC_memalign(size_t align, size_t lb)
  553. {
  554. size_t new_lb;
  555. size_t offset;
  556. ptr_t result;
  557. # ifdef ALIGN_DOUBLE
  558. if (align <= WORDS_TO_BYTES(2) && lb > align) return GC_malloc(lb);
  559. # endif
  560. if (align <= WORDS_TO_BYTES(1)) return GC_malloc(lb);
  561. if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
  562. if (align > HBLKSIZE) return GC_oom_fn(LONG_MAX-1024) /* Fail */;
  563. return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
  564. /* Will be HBLKSIZE aligned. */
  565. }
  566. /* We could also try to make sure that the real rounded-up object size */
  567. /* is a multiple of align. That would be correct up to HBLKSIZE. */
  568. new_lb = lb + align - 1;
  569. result = GC_malloc(new_lb);
  570. offset = (word)result % align;
  571. if (offset != 0) {
  572. offset = align - offset;
  573. if (!GC_all_interior_pointers) {
  574. if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
  575. GC_register_displacement(offset);
  576. }
  577. }
  578. result = (GC_PTR) ((ptr_t)result + offset);
  579. GC_ASSERT((word)result % align == 0);
  580. return result;
  581. }
  582. #endif
  583. # ifdef ATOMIC_UNCOLLECTABLE
  584. /* Allocate lb bytes of pointerfree, untraced, uncollectable data */
  585. /* This is normally roughly equivalent to the system malloc. */
  586. /* But it may be useful if malloc is redefined. */
  587. # ifdef __STDC__
  588. GC_PTR GC_malloc_atomic_uncollectable(size_t lb)
  589. # else
  590. GC_PTR GC_malloc_atomic_uncollectable(lb)
  591. size_t lb;
  592. # endif
  593. {
  594. register ptr_t op;
  595. register ptr_t *opp;
  596. register word lw;
  597. DCL_LOCK_STATE;
  598. if( SMALL_OBJ(lb) ) {
  599. # ifdef MERGE_SIZES
  600. if (EXTRA_BYTES != 0 && lb != 0) lb--;
  601. /* We don't need the extra byte, since this won't be */
  602. /* collected anyway. */
  603. lw = GC_size_map[lb];
  604. # else
  605. lw = ALIGNED_WORDS(lb);
  606. # endif
  607. opp = &(GC_auobjfreelist[lw]);
  608. FASTLOCK();
  609. if( FASTLOCK_SUCCEEDED() && (op = *opp) != 0 ) {
  610. /* See above comment on signals. */
  611. *opp = obj_link(op);
  612. obj_link(op) = 0;
  613. GC_words_allocd += lw;
  614. /* Mark bit was already set while object was on free list. */
  615. GC_non_gc_bytes += WORDS_TO_BYTES(lw);
  616. FASTUNLOCK();
  617. return((GC_PTR) op);
  618. }
  619. FASTUNLOCK();
  620. op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
  621. } else {
  622. op = (ptr_t)GC_generic_malloc((word)lb, AUNCOLLECTABLE);
  623. }
  624. if (0 == op) return(0);
  625. /* We don't need the lock here, since we have an undisguised */
  626. /* pointer. We do need to hold the lock while we adjust */
  627. /* mark bits. */
  628. {
  629. register struct hblk * h;
  630. h = HBLKPTR(op);
  631. lw = HDR(h) -> hb_sz;
  632. DISABLE_SIGNALS();
  633. LOCK();
  634. GC_set_mark_bit(op);
  635. GC_non_gc_bytes += WORDS_TO_BYTES(lw);
  636. UNLOCK();
  637. ENABLE_SIGNALS();
  638. return((GC_PTR) op);
  639. }
  640. }
  641. #endif /* ATOMIC_UNCOLLECTABLE */