glibc-2.38-memalign_fix-1.patch 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586
  1. Submitted By: Xi Ruoyao <xry111 at xry111.site>
  2. Date: 2023-08-13
  3. Initial Package Version: 2.38
  4. Upstream Status: Under review
  5. Origin: Upstream & Self
  6. - 1/3: https://sourceware.org/git/?p=glibc.git;a=patch;h=542b11058525
  7. - 2/3: https://sourceware.org/pipermail/libc-alpha/2023-August/150857.html
  8. - 3/3: Trivial unused code removal
  9. Description: Fixes a regression causing posix_memalign()
  10. very slow in certain conditions to avoid
  11. breaking ffmpeg-based applications.
  12. From fc01478d06658ace8d57e5328c1e717275acfe84 Mon Sep 17 00:00:00 2001
  13. From: Florian Weimer <fweimer@redhat.com>
  14. Date: Fri, 11 Aug 2023 11:18:17 +0200
  15. Subject: [PATCH 1/3] malloc: Enable merging of remainders in memalign (bug
  16. 30723)
  17. Previously, calling _int_free from _int_memalign could put remainders
  18. into the tcache or into fastbins, where they are invisible to the
  19. low-level allocator. This results in missed merge opportunities
  20. because once these freed chunks become available to the low-level
  21. allocator, further memalign allocations (even of the same size are)
  22. likely obstructing merges.
  23. Furthermore, during forwards merging in _int_memalign, do not
  24. completely give up when the remainder is too small to serve as a
  25. chunk on its own. We can still give it back if it can be merged
  26. with the following unused chunk. This makes it more likely that
  27. memalign calls in a loop achieve a compact memory layout,
  28. independently of initial heap layout.
  29. Drop some useless (unsigned long) casts along the way, and tweak
  30. the style to more closely match GNU on changed lines.
  31. Reviewed-by: DJ Delorie <dj@redhat.com>
  32. (cherry picked from commit 542b1105852568c3ebc712225ae78b8c8ba31a78)
  33. ---
  34. malloc/malloc.c | 197 +++++++++++++++++++++++++++++-------------------
  35. 1 file changed, 121 insertions(+), 76 deletions(-)
  36. diff --git a/malloc/malloc.c b/malloc/malloc.c
  37. index e2f1a615a4..948f9759af 100644
  38. --- a/malloc/malloc.c
  39. +++ b/malloc/malloc.c
  40. @@ -1086,6 +1086,11 @@ typedef struct malloc_chunk* mchunkptr;
  41. static void* _int_malloc(mstate, size_t);
  42. static void _int_free(mstate, mchunkptr, int);
  43. +static void _int_free_merge_chunk (mstate, mchunkptr, INTERNAL_SIZE_T);
  44. +static INTERNAL_SIZE_T _int_free_create_chunk (mstate,
  45. + mchunkptr, INTERNAL_SIZE_T,
  46. + mchunkptr, INTERNAL_SIZE_T);
  47. +static void _int_free_maybe_consolidate (mstate, INTERNAL_SIZE_T);
  48. static void* _int_realloc(mstate, mchunkptr, INTERNAL_SIZE_T,
  49. INTERNAL_SIZE_T);
  50. static void* _int_memalign(mstate, size_t, size_t);
  51. @@ -4637,31 +4642,52 @@ _int_free (mstate av, mchunkptr p, int have_lock)
  52. if (!have_lock)
  53. __libc_lock_lock (av->mutex);
  54. - nextchunk = chunk_at_offset(p, size);
  55. -
  56. - /* Lightweight tests: check whether the block is already the
  57. - top block. */
  58. - if (__glibc_unlikely (p == av->top))
  59. - malloc_printerr ("double free or corruption (top)");
  60. - /* Or whether the next chunk is beyond the boundaries of the arena. */
  61. - if (__builtin_expect (contiguous (av)
  62. - && (char *) nextchunk
  63. - >= ((char *) av->top + chunksize(av->top)), 0))
  64. - malloc_printerr ("double free or corruption (out)");
  65. - /* Or whether the block is actually not marked used. */
  66. - if (__glibc_unlikely (!prev_inuse(nextchunk)))
  67. - malloc_printerr ("double free or corruption (!prev)");
  68. -
  69. - nextsize = chunksize(nextchunk);
  70. - if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
  71. - || __builtin_expect (nextsize >= av->system_mem, 0))
  72. - malloc_printerr ("free(): invalid next size (normal)");
  73. + _int_free_merge_chunk (av, p, size);
  74. - free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
  75. + if (!have_lock)
  76. + __libc_lock_unlock (av->mutex);
  77. + }
  78. + /*
  79. + If the chunk was allocated via mmap, release via munmap().
  80. + */
  81. +
  82. + else {
  83. + munmap_chunk (p);
  84. + }
  85. +}
  86. +
  87. +/* Try to merge chunk P of SIZE bytes with its neighbors. Put the
  88. + resulting chunk on the appropriate bin list. P must not be on a
  89. + bin list yet, and it can be in use. */
  90. +static void
  91. +_int_free_merge_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size)
  92. +{
  93. + mchunkptr nextchunk = chunk_at_offset(p, size);
  94. +
  95. + /* Lightweight tests: check whether the block is already the
  96. + top block. */
  97. + if (__glibc_unlikely (p == av->top))
  98. + malloc_printerr ("double free or corruption (top)");
  99. + /* Or whether the next chunk is beyond the boundaries of the arena. */
  100. + if (__builtin_expect (contiguous (av)
  101. + && (char *) nextchunk
  102. + >= ((char *) av->top + chunksize(av->top)), 0))
  103. + malloc_printerr ("double free or corruption (out)");
  104. + /* Or whether the block is actually not marked used. */
  105. + if (__glibc_unlikely (!prev_inuse(nextchunk)))
  106. + malloc_printerr ("double free or corruption (!prev)");
  107. +
  108. + INTERNAL_SIZE_T nextsize = chunksize(nextchunk);
  109. + if (__builtin_expect (chunksize_nomask (nextchunk) <= CHUNK_HDR_SZ, 0)
  110. + || __builtin_expect (nextsize >= av->system_mem, 0))
  111. + malloc_printerr ("free(): invalid next size (normal)");
  112. +
  113. + free_perturb (chunk2mem(p), size - CHUNK_HDR_SZ);
  114. - /* consolidate backward */
  115. - if (!prev_inuse(p)) {
  116. - prevsize = prev_size (p);
  117. + /* Consolidate backward. */
  118. + if (!prev_inuse(p))
  119. + {
  120. + INTERNAL_SIZE_T prevsize = prev_size (p);
  121. size += prevsize;
  122. p = chunk_at_offset(p, -((long) prevsize));
  123. if (__glibc_unlikely (chunksize(p) != prevsize))
  124. @@ -4669,9 +4695,25 @@ _int_free (mstate av, mchunkptr p, int have_lock)
  125. unlink_chunk (av, p);
  126. }
  127. - if (nextchunk != av->top) {
  128. + /* Write the chunk header, maybe after merging with the following chunk. */
  129. + size = _int_free_create_chunk (av, p, size, nextchunk, nextsize);
  130. + _int_free_maybe_consolidate (av, size);
  131. +}
  132. +
  133. +/* Create a chunk at P of SIZE bytes, with SIZE potentially increased
  134. + to cover the immediately following chunk NEXTCHUNK of NEXTSIZE
  135. + bytes (if NEXTCHUNK is unused). The chunk at P is not actually
  136. + read and does not have to be initialized. After creation, it is
  137. + placed on the appropriate bin list. The function returns the size
  138. + of the new chunk. */
  139. +static INTERNAL_SIZE_T
  140. +_int_free_create_chunk (mstate av, mchunkptr p, INTERNAL_SIZE_T size,
  141. + mchunkptr nextchunk, INTERNAL_SIZE_T nextsize)
  142. +{
  143. + if (nextchunk != av->top)
  144. + {
  145. /* get and clear inuse bit */
  146. - nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
  147. + bool nextinuse = inuse_bit_at_offset (nextchunk, nextsize);
  148. /* consolidate forward */
  149. if (!nextinuse) {
  150. @@ -4686,8 +4728,8 @@ _int_free (mstate av, mchunkptr p, int have_lock)
  151. been given one chance to be used in malloc.
  152. */
  153. - bck = unsorted_chunks(av);
  154. - fwd = bck->fd;
  155. + mchunkptr bck = unsorted_chunks (av);
  156. + mchunkptr fwd = bck->fd;
  157. if (__glibc_unlikely (fwd->bk != bck))
  158. malloc_printerr ("free(): corrupted unsorted chunks");
  159. p->fd = fwd;
  160. @@ -4706,61 +4748,52 @@ _int_free (mstate av, mchunkptr p, int have_lock)
  161. check_free_chunk(av, p);
  162. }
  163. - /*
  164. - If the chunk borders the current high end of memory,
  165. - consolidate into top
  166. - */
  167. -
  168. - else {
  169. + else
  170. + {
  171. + /* If the chunk borders the current high end of memory,
  172. + consolidate into top. */
  173. size += nextsize;
  174. set_head(p, size | PREV_INUSE);
  175. av->top = p;
  176. check_chunk(av, p);
  177. }
  178. - /*
  179. - If freeing a large space, consolidate possibly-surrounding
  180. - chunks. Then, if the total unused topmost memory exceeds trim
  181. - threshold, ask malloc_trim to reduce top.
  182. -
  183. - Unless max_fast is 0, we don't know if there are fastbins
  184. - bordering top, so we cannot tell for sure whether threshold
  185. - has been reached unless fastbins are consolidated. But we
  186. - don't want to consolidate on each free. As a compromise,
  187. - consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
  188. - is reached.
  189. - */
  190. + return size;
  191. +}
  192. - if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
  193. +/* If freeing a large space, consolidate possibly-surrounding
  194. + chunks. Then, if the total unused topmost memory exceeds trim
  195. + threshold, ask malloc_trim to reduce top. */
  196. +static void
  197. +_int_free_maybe_consolidate (mstate av, INTERNAL_SIZE_T size)
  198. +{
  199. + /* Unless max_fast is 0, we don't know if there are fastbins
  200. + bordering top, so we cannot tell for sure whether threshold has
  201. + been reached unless fastbins are consolidated. But we don't want
  202. + to consolidate on each free. As a compromise, consolidation is
  203. + performed if FASTBIN_CONSOLIDATION_THRESHOLD is reached. */
  204. + if (size >= FASTBIN_CONSOLIDATION_THRESHOLD)
  205. + {
  206. if (atomic_load_relaxed (&av->have_fastchunks))
  207. malloc_consolidate(av);
  208. - if (av == &main_arena) {
  209. + if (av == &main_arena)
  210. + {
  211. #ifndef MORECORE_CANNOT_TRIM
  212. - if ((unsigned long)(chunksize(av->top)) >=
  213. - (unsigned long)(mp_.trim_threshold))
  214. - systrim(mp_.top_pad, av);
  215. + if (chunksize (av->top) >= mp_.trim_threshold)
  216. + systrim (mp_.top_pad, av);
  217. #endif
  218. - } else {
  219. - /* Always try heap_trim(), even if the top chunk is not
  220. - large, because the corresponding heap might go away. */
  221. - heap_info *heap = heap_for_ptr(top(av));
  222. + }
  223. + else
  224. + {
  225. + /* Always try heap_trim, even if the top chunk is not large,
  226. + because the corresponding heap might go away. */
  227. + heap_info *heap = heap_for_ptr (top (av));
  228. - assert(heap->ar_ptr == av);
  229. - heap_trim(heap, mp_.top_pad);
  230. - }
  231. + assert (heap->ar_ptr == av);
  232. + heap_trim (heap, mp_.top_pad);
  233. + }
  234. }
  235. -
  236. - if (!have_lock)
  237. - __libc_lock_unlock (av->mutex);
  238. - }
  239. - /*
  240. - If the chunk was allocated via mmap, release via munmap().
  241. - */
  242. -
  243. - else {
  244. - munmap_chunk (p);
  245. - }
  246. }
  247. /*
  248. @@ -5221,7 +5254,7 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
  249. (av != &main_arena ? NON_MAIN_ARENA : 0));
  250. set_inuse_bit_at_offset (newp, newsize);
  251. set_head_size (p, leadsize | (av != &main_arena ? NON_MAIN_ARENA : 0));
  252. - _int_free (av, p, 1);
  253. + _int_free_merge_chunk (av, p, leadsize);
  254. p = newp;
  255. assert (newsize >= nb &&
  256. @@ -5232,15 +5265,27 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
  257. if (!chunk_is_mmapped (p))
  258. {
  259. size = chunksize (p);
  260. - if ((unsigned long) (size) > (unsigned long) (nb + MINSIZE))
  261. + mchunkptr nextchunk = chunk_at_offset(p, size);
  262. + INTERNAL_SIZE_T nextsize = chunksize(nextchunk);
  263. + if (size > nb)
  264. {
  265. remainder_size = size - nb;
  266. - remainder = chunk_at_offset (p, nb);
  267. - set_head (remainder, remainder_size | PREV_INUSE |
  268. - (av != &main_arena ? NON_MAIN_ARENA : 0));
  269. - set_head_size (p, nb);
  270. - _int_free (av, remainder, 1);
  271. - }
  272. + if (remainder_size >= MINSIZE
  273. + || nextchunk == av->top
  274. + || !inuse_bit_at_offset (nextchunk, nextsize))
  275. + {
  276. + /* We can only give back the tail if it is larger than
  277. + MINSIZE, or if the following chunk is unused (top
  278. + chunk or unused in-heap chunk). Otherwise we would
  279. + create a chunk that is smaller than MINSIZE. */
  280. + remainder = chunk_at_offset (p, nb);
  281. + set_head_size (p, nb);
  282. + remainder_size = _int_free_create_chunk (av, remainder,
  283. + remainder_size,
  284. + nextchunk, nextsize);
  285. + _int_free_maybe_consolidate (av, remainder_size);
  286. + }
  287. + }
  288. }
  289. check_inuse_chunk (av, p);
  290. --
  291. 2.41.0
  292. From b37e836b7cc2dba672e1de1cc7e076ba1c712614 Mon Sep 17 00:00:00 2001
  293. From: Florian Weimer <fweimer@redhat.com>
  294. Date: Fri, 11 Aug 2023 17:48:13 +0200
  295. Subject: [PATCH 2/3] malloc: Remove bin scanning from memalign (bug 30723)
  296. On the test workload (mpv --cache=yes with VP9 video decoding), the
  297. bin scanning has a very poor success rate (less than 2%). The tcache
  298. scanning has about 50% success rate, so keep that.
  299. Update comments in malloc/tst-memalign-2 to indicate the purpose
  300. of the tests. Even with the scanning removed, the additional
  301. merging opportunities since commit 542b1105852568c3ebc712225ae78b
  302. ("malloc: Enable merging of remainders in memalign (bug 30723)")
  303. are sufficient to pass the existing large bins test.
  304. Link: https://sourceware.org/pipermail/libc-alpha/2023-August/150857.html
  305. ---
  306. malloc/malloc.c | 127 ++--------------------------------------
  307. malloc/tst-memalign-2.c | 7 ++-
  308. 2 files changed, 10 insertions(+), 124 deletions(-)
  309. diff --git a/malloc/malloc.c b/malloc/malloc.c
  310. index 948f9759af..9c2cab7a59 100644
  311. --- a/malloc/malloc.c
  312. +++ b/malloc/malloc.c
  313. @@ -5082,7 +5082,6 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
  314. mchunkptr remainder; /* spare room at end to split off */
  315. unsigned long remainder_size; /* its size */
  316. INTERNAL_SIZE_T size;
  317. - mchunkptr victim;
  318. nb = checked_request2size (bytes);
  319. if (nb == 0)
  320. @@ -5101,129 +5100,13 @@ _int_memalign (mstate av, size_t alignment, size_t bytes)
  321. we don't find anything in those bins, the common malloc code will
  322. scan starting at 2x. */
  323. - /* This will be set if we found a candidate chunk. */
  324. - victim = NULL;
  325. + /* Call malloc with worst case padding to hit alignment. */
  326. + m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
  327. - /* Fast bins are singly-linked, hard to remove a chunk from the middle
  328. - and unlikely to meet our alignment requirements. We have not done
  329. - any experimentation with searching for aligned fastbins. */
  330. + if (m == 0)
  331. + return 0; /* propagate failure */
  332. - if (av != NULL)
  333. - {
  334. - int first_bin_index;
  335. - int first_largebin_index;
  336. - int last_bin_index;
  337. -
  338. - if (in_smallbin_range (nb))
  339. - first_bin_index = smallbin_index (nb);
  340. - else
  341. - first_bin_index = largebin_index (nb);
  342. -
  343. - if (in_smallbin_range (nb * 2))
  344. - last_bin_index = smallbin_index (nb * 2);
  345. - else
  346. - last_bin_index = largebin_index (nb * 2);
  347. -
  348. - first_largebin_index = largebin_index (MIN_LARGE_SIZE);
  349. -
  350. - int victim_index; /* its bin index */
  351. -
  352. - for (victim_index = first_bin_index;
  353. - victim_index < last_bin_index;
  354. - victim_index ++)
  355. - {
  356. - victim = NULL;
  357. -
  358. - if (victim_index < first_largebin_index)
  359. - {
  360. - /* Check small bins. Small bin chunks are doubly-linked despite
  361. - being the same size. */
  362. -
  363. - mchunkptr fwd; /* misc temp for linking */
  364. - mchunkptr bck; /* misc temp for linking */
  365. -
  366. - bck = bin_at (av, victim_index);
  367. - fwd = bck->fd;
  368. - while (fwd != bck)
  369. - {
  370. - if (chunk_ok_for_memalign (fwd, alignment, nb) > 0)
  371. - {
  372. - victim = fwd;
  373. -
  374. - /* Unlink it */
  375. - victim->fd->bk = victim->bk;
  376. - victim->bk->fd = victim->fd;
  377. - break;
  378. - }
  379. -
  380. - fwd = fwd->fd;
  381. - }
  382. - }
  383. - else
  384. - {
  385. - /* Check large bins. */
  386. - mchunkptr fwd; /* misc temp for linking */
  387. - mchunkptr bck; /* misc temp for linking */
  388. - mchunkptr best = NULL;
  389. - size_t best_size = 0;
  390. -
  391. - bck = bin_at (av, victim_index);
  392. - fwd = bck->fd;
  393. -
  394. - while (fwd != bck)
  395. - {
  396. - int extra;
  397. -
  398. - if (chunksize (fwd) < nb)
  399. - break;
  400. - extra = chunk_ok_for_memalign (fwd, alignment, nb);
  401. - if (extra > 0
  402. - && (extra <= best_size || best == NULL))
  403. - {
  404. - best = fwd;
  405. - best_size = extra;
  406. - }
  407. -
  408. - fwd = fwd->fd;
  409. - }
  410. - victim = best;
  411. -
  412. - if (victim != NULL)
  413. - {
  414. - unlink_chunk (av, victim);
  415. - break;
  416. - }
  417. - }
  418. -
  419. - if (victim != NULL)
  420. - break;
  421. - }
  422. - }
  423. -
  424. - /* Strategy: find a spot within that chunk that meets the alignment
  425. - request, and then possibly free the leading and trailing space.
  426. - This strategy is incredibly costly and can lead to external
  427. - fragmentation if header and footer chunks are unused. */
  428. -
  429. - if (victim != NULL)
  430. - {
  431. - p = victim;
  432. - m = chunk2mem (p);
  433. - set_inuse (p);
  434. - if (av != &main_arena)
  435. - set_non_main_arena (p);
  436. - }
  437. - else
  438. - {
  439. - /* Call malloc with worst case padding to hit alignment. */
  440. -
  441. - m = (char *) (_int_malloc (av, nb + alignment + MINSIZE));
  442. -
  443. - if (m == 0)
  444. - return 0; /* propagate failure */
  445. -
  446. - p = mem2chunk (m);
  447. - }
  448. + p = mem2chunk (m);
  449. if ((((unsigned long) (m)) % alignment) != 0) /* misaligned */
  450. {
  451. diff --git a/malloc/tst-memalign-2.c b/malloc/tst-memalign-2.c
  452. index f229283dbf..ecd6fa249e 100644
  453. --- a/malloc/tst-memalign-2.c
  454. +++ b/malloc/tst-memalign-2.c
  455. @@ -86,7 +86,8 @@ do_test (void)
  456. TEST_VERIFY (tcache_allocs[i].ptr1 == tcache_allocs[i].ptr2);
  457. }
  458. - /* Test for non-head tcache hits. */
  459. + /* Test for non-head tcache hits. This exercises the memalign
  460. + scanning code to find matching allocations. */
  461. for (i = 0; i < array_length (ptr); ++ i)
  462. {
  463. if (i == 4)
  464. @@ -113,7 +114,9 @@ do_test (void)
  465. free (p);
  466. TEST_VERIFY (count > 0);
  467. - /* Large bins test. */
  468. + /* Large bins test. This verifies that the over-allocated parts
  469. + that memalign releases for future allocations can be reused by
  470. + memalign itself at least in some cases. */
  471. for (i = 0; i < LN; ++ i)
  472. {
  473. --
  474. 2.41.0
  475. From 26973f7b09c33e67f6bcbc79371796c8dd334528 Mon Sep 17 00:00:00 2001
  476. From: Xi Ruoyao <xry111@xry111.site>
  477. Date: Mon, 14 Aug 2023 11:05:18 +0800
  478. Subject: [PATCH 3/3] malloc: Remove unused functions and variables
  479. Remove unused chunk_ok_for_memalign function and unused local variables
  480. in _int_free.
  481. Signed-off-by: Xi Ruoyao <xry111@xry111.site>
  482. ---
  483. malloc/malloc.c | 42 ------------------------------------------
  484. 1 file changed, 42 deletions(-)
  485. diff --git a/malloc/malloc.c b/malloc/malloc.c
  486. index 9c2cab7a59..d0bbbf3710 100644
  487. --- a/malloc/malloc.c
  488. +++ b/malloc/malloc.c
  489. @@ -4488,12 +4488,6 @@ _int_free (mstate av, mchunkptr p, int have_lock)
  490. {
  491. INTERNAL_SIZE_T size; /* its size */
  492. mfastbinptr *fb; /* associated fastbin */
  493. - mchunkptr nextchunk; /* next contiguous chunk */
  494. - INTERNAL_SIZE_T nextsize; /* its size */
  495. - int nextinuse; /* true if nextchunk is used */
  496. - INTERNAL_SIZE_T prevsize; /* size of previous contiguous chunk */
  497. - mchunkptr bck; /* misc temp for linking */
  498. - mchunkptr fwd; /* misc temp for linking */
  499. size = chunksize (p);
  500. @@ -5032,42 +5026,6 @@ _int_realloc (mstate av, mchunkptr oldp, INTERNAL_SIZE_T oldsize,
  501. ------------------------------ memalign ------------------------------
  502. */
  503. -/* Returns 0 if the chunk is not and does not contain the requested
  504. - aligned sub-chunk, else returns the amount of "waste" from
  505. - trimming. NB is the *chunk* byte size, not the user byte
  506. - size. */
  507. -static size_t
  508. -chunk_ok_for_memalign (mchunkptr p, size_t alignment, size_t nb)
  509. -{
  510. - void *m = chunk2mem (p);
  511. - INTERNAL_SIZE_T size = chunksize (p);
  512. - void *aligned_m = m;
  513. -
  514. - if (__glibc_unlikely (misaligned_chunk (p)))
  515. - malloc_printerr ("_int_memalign(): unaligned chunk detected");
  516. -
  517. - aligned_m = PTR_ALIGN_UP (m, alignment);
  518. -
  519. - INTERNAL_SIZE_T front_extra = (intptr_t) aligned_m - (intptr_t) m;
  520. -
  521. - /* We can't trim off the front as it's too small. */
  522. - if (front_extra > 0 && front_extra < MINSIZE)
  523. - return 0;
  524. -
  525. - /* If it's a perfect fit, it's an exception to the return value rule
  526. - (we would return zero waste, which looks like "not usable"), so
  527. - handle it here by returning a small non-zero value instead. */
  528. - if (size == nb && front_extra == 0)
  529. - return 1;
  530. -
  531. - /* If the block we need fits in the chunk, calculate total waste. */
  532. - if (size > nb + front_extra)
  533. - return size - nb;
  534. -
  535. - /* Can't use this chunk. */
  536. - return 0;
  537. -}
  538. -
  539. /* BYTES is user requested bytes, not requested chunksize bytes. */
  540. static void *
  541. _int_memalign (mstate av, size_t alignment, size_t bytes)
  542. --
  543. 2.41.0