bootmem.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860
  1. /*
  2. * bootmem - A boot-time physical memory allocator and configurator
  3. *
  4. * Copyright (C) 1999 Ingo Molnar
  5. * 1999 Kanoj Sarcar, SGI
  6. * 2008 Johannes Weiner
  7. *
  8. * Access to this subsystem has to be serialized externally (which is true
  9. * for the boot process anyway).
  10. */
  11. #include <linux/init.h>
  12. #include <linux/pfn.h>
  13. #include <linux/slab.h>
  14. #include <linux/export.h>
  15. #include <linux/kmemleak.h>
  16. #include <linux/range.h>
  17. #include <linux/bug.h>
  18. #include <linux/io.h>
  19. #include <linux/bootmem.h>
  20. #include "internal.h"
  21. #ifndef CONFIG_NEED_MULTIPLE_NODES
  22. struct pglist_data __refdata contig_page_data = {
  23. .bdata = &bootmem_node_data[0]
  24. };
  25. EXPORT_SYMBOL(contig_page_data);
  26. #endif
  27. unsigned long max_low_pfn;
  28. unsigned long min_low_pfn;
  29. unsigned long max_pfn;
  30. unsigned long long max_possible_pfn;
  31. bootmem_data_t bootmem_node_data[MAX_NUMNODES] __initdata;
  32. static struct list_head bdata_list __initdata = LIST_HEAD_INIT(bdata_list);
  33. static int bootmem_debug;
  34. static int __init bootmem_debug_setup(char *buf)
  35. {
  36. bootmem_debug = 1;
  37. return 0;
  38. }
  39. early_param("bootmem_debug", bootmem_debug_setup);
  40. #define bdebug(fmt, args...) ({ \
  41. if (unlikely(bootmem_debug)) \
  42. pr_info("bootmem::%s " fmt, \
  43. __func__, ## args); \
  44. })
  45. static unsigned long __init bootmap_bytes(unsigned long pages)
  46. {
  47. unsigned long bytes = DIV_ROUND_UP(pages, 8);
  48. return ALIGN(bytes, sizeof(long));
  49. }
  50. /**
  51. * bootmem_bootmap_pages - calculate bitmap size in pages
  52. * @pages: number of pages the bitmap has to represent
  53. */
  54. unsigned long __init bootmem_bootmap_pages(unsigned long pages)
  55. {
  56. unsigned long bytes = bootmap_bytes(pages);
  57. return PAGE_ALIGN(bytes) >> PAGE_SHIFT;
  58. }
  59. /*
  60. * link bdata in order
  61. */
  62. static void __init link_bootmem(bootmem_data_t *bdata)
  63. {
  64. bootmem_data_t *ent;
  65. list_for_each_entry(ent, &bdata_list, list) {
  66. if (bdata->node_min_pfn < ent->node_min_pfn) {
  67. list_add_tail(&bdata->list, &ent->list);
  68. return;
  69. }
  70. }
  71. list_add_tail(&bdata->list, &bdata_list);
  72. }
  73. /*
  74. * Called once to set up the allocator itself.
  75. */
  76. static unsigned long __init init_bootmem_core(bootmem_data_t *bdata,
  77. unsigned long mapstart, unsigned long start, unsigned long end)
  78. {
  79. unsigned long mapsize;
  80. mminit_validate_memmodel_limits(&start, &end);
  81. bdata->node_bootmem_map = phys_to_virt(PFN_PHYS(mapstart));
  82. bdata->node_min_pfn = start;
  83. bdata->node_low_pfn = end;
  84. link_bootmem(bdata);
  85. /*
  86. * Initially all pages are reserved - setup_arch() has to
  87. * register free RAM areas explicitly.
  88. */
  89. mapsize = bootmap_bytes(end - start);
  90. memset(bdata->node_bootmem_map, 0xff, mapsize);
  91. bdebug("nid=%td start=%lx map=%lx end=%lx mapsize=%lx\n",
  92. bdata - bootmem_node_data, start, mapstart, end, mapsize);
  93. return mapsize;
  94. }
  95. /**
  96. * init_bootmem_node - register a node as boot memory
  97. * @pgdat: node to register
  98. * @freepfn: pfn where the bitmap for this node is to be placed
  99. * @startpfn: first pfn on the node
  100. * @endpfn: first pfn after the node
  101. *
  102. * Returns the number of bytes needed to hold the bitmap for this node.
  103. */
  104. unsigned long __init init_bootmem_node(pg_data_t *pgdat, unsigned long freepfn,
  105. unsigned long startpfn, unsigned long endpfn)
  106. {
  107. return init_bootmem_core(pgdat->bdata, freepfn, startpfn, endpfn);
  108. }
  109. /**
  110. * init_bootmem - register boot memory
  111. * @start: pfn where the bitmap is to be placed
  112. * @pages: number of available physical pages
  113. *
  114. * Returns the number of bytes needed to hold the bitmap.
  115. */
  116. unsigned long __init init_bootmem(unsigned long start, unsigned long pages)
  117. {
  118. max_low_pfn = pages;
  119. min_low_pfn = start;
  120. return init_bootmem_core(NODE_DATA(0)->bdata, start, 0, pages);
  121. }
  122. /*
  123. * free_bootmem_late - free bootmem pages directly to page allocator
  124. * @addr: starting physical address of the range
  125. * @size: size of the range in bytes
  126. *
  127. * This is only useful when the bootmem allocator has already been torn
  128. * down, but we are still initializing the system. Pages are given directly
  129. * to the page allocator, no bootmem metadata is updated because it is gone.
  130. */
  131. void __init free_bootmem_late(unsigned long physaddr, unsigned long size)
  132. {
  133. unsigned long cursor, end;
  134. kmemleak_free_part_phys(physaddr, size);
  135. cursor = PFN_UP(physaddr);
  136. end = PFN_DOWN(physaddr + size);
  137. for (; cursor < end; cursor++) {
  138. __free_pages_bootmem(pfn_to_page(cursor), cursor, 0);
  139. totalram_pages++;
  140. }
  141. }
  142. static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
  143. {
  144. struct page *page;
  145. unsigned long *map, start, end, pages, cur, count = 0;
  146. if (!bdata->node_bootmem_map)
  147. return 0;
  148. map = bdata->node_bootmem_map;
  149. start = bdata->node_min_pfn;
  150. end = bdata->node_low_pfn;
  151. bdebug("nid=%td start=%lx end=%lx\n",
  152. bdata - bootmem_node_data, start, end);
  153. while (start < end) {
  154. unsigned long idx, vec;
  155. unsigned shift;
  156. idx = start - bdata->node_min_pfn;
  157. shift = idx & (BITS_PER_LONG - 1);
  158. /*
  159. * vec holds at most BITS_PER_LONG map bits,
  160. * bit 0 corresponds to start.
  161. */
  162. vec = ~map[idx / BITS_PER_LONG];
  163. if (shift) {
  164. vec >>= shift;
  165. if (end - start >= BITS_PER_LONG)
  166. vec |= ~map[idx / BITS_PER_LONG + 1] <<
  167. (BITS_PER_LONG - shift);
  168. }
  169. /*
  170. * If we have a properly aligned and fully unreserved
  171. * BITS_PER_LONG block of pages in front of us, free
  172. * it in one go.
  173. */
  174. if (IS_ALIGNED(start, BITS_PER_LONG) && vec == ~0UL) {
  175. int order = ilog2(BITS_PER_LONG);
  176. __free_pages_bootmem(pfn_to_page(start), start, order);
  177. count += BITS_PER_LONG;
  178. start += BITS_PER_LONG;
  179. } else {
  180. cur = start;
  181. start = ALIGN(start + 1, BITS_PER_LONG);
  182. while (vec && cur != start) {
  183. if (vec & 1) {
  184. page = pfn_to_page(cur);
  185. __free_pages_bootmem(page, cur, 0);
  186. count++;
  187. }
  188. vec >>= 1;
  189. ++cur;
  190. }
  191. }
  192. }
  193. cur = bdata->node_min_pfn;
  194. page = virt_to_page(bdata->node_bootmem_map);
  195. pages = bdata->node_low_pfn - bdata->node_min_pfn;
  196. pages = bootmem_bootmap_pages(pages);
  197. count += pages;
  198. while (pages--)
  199. __free_pages_bootmem(page++, cur++, 0);
  200. bdata->node_bootmem_map = NULL;
  201. bdebug("nid=%td released=%lx\n", bdata - bootmem_node_data, count);
  202. return count;
  203. }
  204. static int reset_managed_pages_done __initdata;
  205. void reset_node_managed_pages(pg_data_t *pgdat)
  206. {
  207. struct zone *z;
  208. for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
  209. z->managed_pages = 0;
  210. }
  211. void __init reset_all_zones_managed_pages(void)
  212. {
  213. struct pglist_data *pgdat;
  214. if (reset_managed_pages_done)
  215. return;
  216. for_each_online_pgdat(pgdat)
  217. reset_node_managed_pages(pgdat);
  218. reset_managed_pages_done = 1;
  219. }
  220. /**
  221. * free_all_bootmem - release free pages to the buddy allocator
  222. *
  223. * Returns the number of pages actually released.
  224. */
  225. unsigned long __init free_all_bootmem(void)
  226. {
  227. unsigned long total_pages = 0;
  228. bootmem_data_t *bdata;
  229. reset_all_zones_managed_pages();
  230. list_for_each_entry(bdata, &bdata_list, list)
  231. total_pages += free_all_bootmem_core(bdata);
  232. totalram_pages += total_pages;
  233. return total_pages;
  234. }
  235. static void __init __free(bootmem_data_t *bdata,
  236. unsigned long sidx, unsigned long eidx)
  237. {
  238. unsigned long idx;
  239. bdebug("nid=%td start=%lx end=%lx\n", bdata - bootmem_node_data,
  240. sidx + bdata->node_min_pfn,
  241. eidx + bdata->node_min_pfn);
  242. if (WARN_ON(bdata->node_bootmem_map == NULL))
  243. return;
  244. if (bdata->hint_idx > sidx)
  245. bdata->hint_idx = sidx;
  246. for (idx = sidx; idx < eidx; idx++)
  247. if (!test_and_clear_bit(idx, bdata->node_bootmem_map))
  248. BUG();
  249. }
  250. static int __init __reserve(bootmem_data_t *bdata, unsigned long sidx,
  251. unsigned long eidx, int flags)
  252. {
  253. unsigned long idx;
  254. int exclusive = flags & BOOTMEM_EXCLUSIVE;
  255. bdebug("nid=%td start=%lx end=%lx flags=%x\n",
  256. bdata - bootmem_node_data,
  257. sidx + bdata->node_min_pfn,
  258. eidx + bdata->node_min_pfn,
  259. flags);
  260. if (WARN_ON(bdata->node_bootmem_map == NULL))
  261. return 0;
  262. for (idx = sidx; idx < eidx; idx++)
  263. if (test_and_set_bit(idx, bdata->node_bootmem_map)) {
  264. if (exclusive) {
  265. __free(bdata, sidx, idx);
  266. return -EBUSY;
  267. }
  268. bdebug("silent double reserve of PFN %lx\n",
  269. idx + bdata->node_min_pfn);
  270. }
  271. return 0;
  272. }
  273. static int __init mark_bootmem_node(bootmem_data_t *bdata,
  274. unsigned long start, unsigned long end,
  275. int reserve, int flags)
  276. {
  277. unsigned long sidx, eidx;
  278. bdebug("nid=%td start=%lx end=%lx reserve=%d flags=%x\n",
  279. bdata - bootmem_node_data, start, end, reserve, flags);
  280. BUG_ON(start < bdata->node_min_pfn);
  281. BUG_ON(end > bdata->node_low_pfn);
  282. sidx = start - bdata->node_min_pfn;
  283. eidx = end - bdata->node_min_pfn;
  284. if (reserve)
  285. return __reserve(bdata, sidx, eidx, flags);
  286. else
  287. __free(bdata, sidx, eidx);
  288. return 0;
  289. }
  290. static int __init mark_bootmem(unsigned long start, unsigned long end,
  291. int reserve, int flags)
  292. {
  293. unsigned long pos;
  294. bootmem_data_t *bdata;
  295. pos = start;
  296. list_for_each_entry(bdata, &bdata_list, list) {
  297. int err;
  298. unsigned long max;
  299. if (pos < bdata->node_min_pfn ||
  300. pos >= bdata->node_low_pfn) {
  301. BUG_ON(pos != start);
  302. continue;
  303. }
  304. max = min(bdata->node_low_pfn, end);
  305. err = mark_bootmem_node(bdata, pos, max, reserve, flags);
  306. if (reserve && err) {
  307. mark_bootmem(start, pos, 0, 0);
  308. return err;
  309. }
  310. if (max == end)
  311. return 0;
  312. pos = bdata->node_low_pfn;
  313. }
  314. BUG();
  315. }
  316. /**
  317. * free_bootmem_node - mark a page range as usable
  318. * @pgdat: node the range resides on
  319. * @physaddr: starting address of the range
  320. * @size: size of the range in bytes
  321. *
  322. * Partial pages will be considered reserved and left as they are.
  323. *
  324. * The range must reside completely on the specified node.
  325. */
  326. void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
  327. unsigned long size)
  328. {
  329. unsigned long start, end;
  330. kmemleak_free_part_phys(physaddr, size);
  331. start = PFN_UP(physaddr);
  332. end = PFN_DOWN(physaddr + size);
  333. mark_bootmem_node(pgdat->bdata, start, end, 0, 0);
  334. }
  335. /**
  336. * free_bootmem - mark a page range as usable
  337. * @addr: starting physical address of the range
  338. * @size: size of the range in bytes
  339. *
  340. * Partial pages will be considered reserved and left as they are.
  341. *
  342. * The range must be contiguous but may span node boundaries.
  343. */
  344. void __init free_bootmem(unsigned long physaddr, unsigned long size)
  345. {
  346. unsigned long start, end;
  347. kmemleak_free_part_phys(physaddr, size);
  348. start = PFN_UP(physaddr);
  349. end = PFN_DOWN(physaddr + size);
  350. mark_bootmem(start, end, 0, 0);
  351. }
  352. /**
  353. * reserve_bootmem_node - mark a page range as reserved
  354. * @pgdat: node the range resides on
  355. * @physaddr: starting address of the range
  356. * @size: size of the range in bytes
  357. * @flags: reservation flags (see linux/bootmem.h)
  358. *
  359. * Partial pages will be reserved.
  360. *
  361. * The range must reside completely on the specified node.
  362. */
  363. int __init reserve_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
  364. unsigned long size, int flags)
  365. {
  366. unsigned long start, end;
  367. start = PFN_DOWN(physaddr);
  368. end = PFN_UP(physaddr + size);
  369. return mark_bootmem_node(pgdat->bdata, start, end, 1, flags);
  370. }
  371. /**
  372. * reserve_bootmem - mark a page range as reserved
  373. * @addr: starting address of the range
  374. * @size: size of the range in bytes
  375. * @flags: reservation flags (see linux/bootmem.h)
  376. *
  377. * Partial pages will be reserved.
  378. *
  379. * The range must be contiguous but may span node boundaries.
  380. */
  381. int __init reserve_bootmem(unsigned long addr, unsigned long size,
  382. int flags)
  383. {
  384. unsigned long start, end;
  385. start = PFN_DOWN(addr);
  386. end = PFN_UP(addr + size);
  387. return mark_bootmem(start, end, 1, flags);
  388. }
  389. static unsigned long __init align_idx(struct bootmem_data *bdata,
  390. unsigned long idx, unsigned long step)
  391. {
  392. unsigned long base = bdata->node_min_pfn;
  393. /*
  394. * Align the index with respect to the node start so that the
  395. * combination of both satisfies the requested alignment.
  396. */
  397. return ALIGN(base + idx, step) - base;
  398. }
  399. static unsigned long __init align_off(struct bootmem_data *bdata,
  400. unsigned long off, unsigned long align)
  401. {
  402. unsigned long base = PFN_PHYS(bdata->node_min_pfn);
  403. /* Same as align_idx for byte offsets */
  404. return ALIGN(base + off, align) - base;
  405. }
  406. static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
  407. unsigned long size, unsigned long align,
  408. unsigned long goal, unsigned long limit)
  409. {
  410. unsigned long fallback = 0;
  411. unsigned long min, max, start, sidx, midx, step;
  412. bdebug("nid=%td size=%lx [%lu pages] align=%lx goal=%lx limit=%lx\n",
  413. bdata - bootmem_node_data, size, PAGE_ALIGN(size) >> PAGE_SHIFT,
  414. align, goal, limit);
  415. BUG_ON(!size);
  416. BUG_ON(align & (align - 1));
  417. BUG_ON(limit && goal + size > limit);
  418. if (!bdata->node_bootmem_map)
  419. return NULL;
  420. min = bdata->node_min_pfn;
  421. max = bdata->node_low_pfn;
  422. goal >>= PAGE_SHIFT;
  423. limit >>= PAGE_SHIFT;
  424. if (limit && max > limit)
  425. max = limit;
  426. if (max <= min)
  427. return NULL;
  428. step = max(align >> PAGE_SHIFT, 1UL);
  429. if (goal && min < goal && goal < max)
  430. start = ALIGN(goal, step);
  431. else
  432. start = ALIGN(min, step);
  433. sidx = start - bdata->node_min_pfn;
  434. midx = max - bdata->node_min_pfn;
  435. if (bdata->hint_idx > sidx) {
  436. /*
  437. * Handle the valid case of sidx being zero and still
  438. * catch the fallback below.
  439. */
  440. fallback = sidx + 1;
  441. sidx = align_idx(bdata, bdata->hint_idx, step);
  442. }
  443. while (1) {
  444. int merge;
  445. void *region;
  446. unsigned long eidx, i, start_off, end_off;
  447. find_block:
  448. sidx = find_next_zero_bit(bdata->node_bootmem_map, midx, sidx);
  449. sidx = align_idx(bdata, sidx, step);
  450. eidx = sidx + PFN_UP(size);
  451. if (sidx >= midx || eidx > midx)
  452. break;
  453. for (i = sidx; i < eidx; i++)
  454. if (test_bit(i, bdata->node_bootmem_map)) {
  455. sidx = align_idx(bdata, i, step);
  456. if (sidx == i)
  457. sidx += step;
  458. goto find_block;
  459. }
  460. if (bdata->last_end_off & (PAGE_SIZE - 1) &&
  461. PFN_DOWN(bdata->last_end_off) + 1 == sidx)
  462. start_off = align_off(bdata, bdata->last_end_off, align);
  463. else
  464. start_off = PFN_PHYS(sidx);
  465. merge = PFN_DOWN(start_off) < sidx;
  466. end_off = start_off + size;
  467. bdata->last_end_off = end_off;
  468. bdata->hint_idx = PFN_UP(end_off);
  469. /*
  470. * Reserve the area now:
  471. */
  472. if (__reserve(bdata, PFN_DOWN(start_off) + merge,
  473. PFN_UP(end_off), BOOTMEM_EXCLUSIVE))
  474. BUG();
  475. region = phys_to_virt(PFN_PHYS(bdata->node_min_pfn) +
  476. start_off);
  477. memset(region, 0, size);
  478. /*
  479. * The min_count is set to 0 so that bootmem allocated blocks
  480. * are never reported as leaks.
  481. */
  482. kmemleak_alloc(region, size, 0, 0);
  483. return region;
  484. }
  485. if (fallback) {
  486. sidx = align_idx(bdata, fallback - 1, step);
  487. fallback = 0;
  488. goto find_block;
  489. }
  490. return NULL;
  491. }
  492. static void * __init alloc_bootmem_core(unsigned long size,
  493. unsigned long align,
  494. unsigned long goal,
  495. unsigned long limit)
  496. {
  497. bootmem_data_t *bdata;
  498. void *region;
  499. if (WARN_ON_ONCE(slab_is_available()))
  500. return kzalloc(size, GFP_NOWAIT);
  501. list_for_each_entry(bdata, &bdata_list, list) {
  502. if (goal && bdata->node_low_pfn <= PFN_DOWN(goal))
  503. continue;
  504. if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
  505. break;
  506. region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
  507. if (region)
  508. return region;
  509. }
  510. return NULL;
  511. }
  512. static void * __init ___alloc_bootmem_nopanic(unsigned long size,
  513. unsigned long align,
  514. unsigned long goal,
  515. unsigned long limit)
  516. {
  517. void *ptr;
  518. restart:
  519. ptr = alloc_bootmem_core(size, align, goal, limit);
  520. if (ptr)
  521. return ptr;
  522. if (goal) {
  523. goal = 0;
  524. goto restart;
  525. }
  526. return NULL;
  527. }
  528. /**
  529. * __alloc_bootmem_nopanic - allocate boot memory without panicking
  530. * @size: size of the request in bytes
  531. * @align: alignment of the region
  532. * @goal: preferred starting address of the region
  533. *
  534. * The goal is dropped if it can not be satisfied and the allocation will
  535. * fall back to memory below @goal.
  536. *
  537. * Allocation may happen on any node in the system.
  538. *
  539. * Returns NULL on failure.
  540. */
  541. void * __init __alloc_bootmem_nopanic(unsigned long size, unsigned long align,
  542. unsigned long goal)
  543. {
  544. unsigned long limit = 0;
  545. return ___alloc_bootmem_nopanic(size, align, goal, limit);
  546. }
  547. static void * __init ___alloc_bootmem(unsigned long size, unsigned long align,
  548. unsigned long goal, unsigned long limit)
  549. {
  550. void *mem = ___alloc_bootmem_nopanic(size, align, goal, limit);
  551. if (mem)
  552. return mem;
  553. /*
  554. * Whoops, we cannot satisfy the allocation request.
  555. */
  556. pr_alert("bootmem alloc of %lu bytes failed!\n", size);
  557. panic("Out of memory");
  558. return NULL;
  559. }
  560. /**
  561. * __alloc_bootmem - allocate boot memory
  562. * @size: size of the request in bytes
  563. * @align: alignment of the region
  564. * @goal: preferred starting address of the region
  565. *
  566. * The goal is dropped if it can not be satisfied and the allocation will
  567. * fall back to memory below @goal.
  568. *
  569. * Allocation may happen on any node in the system.
  570. *
  571. * The function panics if the request can not be satisfied.
  572. */
  573. void * __init __alloc_bootmem(unsigned long size, unsigned long align,
  574. unsigned long goal)
  575. {
  576. unsigned long limit = 0;
  577. return ___alloc_bootmem(size, align, goal, limit);
  578. }
  579. void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
  580. unsigned long size, unsigned long align,
  581. unsigned long goal, unsigned long limit)
  582. {
  583. void *ptr;
  584. if (WARN_ON_ONCE(slab_is_available()))
  585. return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
  586. again:
  587. /* do not panic in alloc_bootmem_bdata() */
  588. if (limit && goal + size > limit)
  589. limit = 0;
  590. ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
  591. if (ptr)
  592. return ptr;
  593. ptr = alloc_bootmem_core(size, align, goal, limit);
  594. if (ptr)
  595. return ptr;
  596. if (goal) {
  597. goal = 0;
  598. goto again;
  599. }
  600. return NULL;
  601. }
  602. void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
  603. unsigned long align, unsigned long goal)
  604. {
  605. return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
  606. }
  607. void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
  608. unsigned long align, unsigned long goal,
  609. unsigned long limit)
  610. {
  611. void *ptr;
  612. ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
  613. if (ptr)
  614. return ptr;
  615. pr_alert("bootmem alloc of %lu bytes failed!\n", size);
  616. panic("Out of memory");
  617. return NULL;
  618. }
  619. /**
  620. * __alloc_bootmem_node - allocate boot memory from a specific node
  621. * @pgdat: node to allocate from
  622. * @size: size of the request in bytes
  623. * @align: alignment of the region
  624. * @goal: preferred starting address of the region
  625. *
  626. * The goal is dropped if it can not be satisfied and the allocation will
  627. * fall back to memory below @goal.
  628. *
  629. * Allocation may fall back to any node in the system if the specified node
  630. * can not hold the requested memory.
  631. *
  632. * The function panics if the request can not be satisfied.
  633. */
  634. void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
  635. unsigned long align, unsigned long goal)
  636. {
  637. if (WARN_ON_ONCE(slab_is_available()))
  638. return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
  639. return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
  640. }
  641. void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
  642. unsigned long align, unsigned long goal)
  643. {
  644. #ifdef MAX_DMA32_PFN
  645. unsigned long end_pfn;
  646. if (WARN_ON_ONCE(slab_is_available()))
  647. return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
  648. /* update goal according ...MAX_DMA32_PFN */
  649. end_pfn = pgdat_end_pfn(pgdat);
  650. if (end_pfn > MAX_DMA32_PFN + (128 >> (20 - PAGE_SHIFT)) &&
  651. (goal >> PAGE_SHIFT) < MAX_DMA32_PFN) {
  652. void *ptr;
  653. unsigned long new_goal;
  654. new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
  655. ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
  656. new_goal, 0);
  657. if (ptr)
  658. return ptr;
  659. }
  660. #endif
  661. return __alloc_bootmem_node(pgdat, size, align, goal);
  662. }
  663. /**
  664. * __alloc_bootmem_low - allocate low boot memory
  665. * @size: size of the request in bytes
  666. * @align: alignment of the region
  667. * @goal: preferred starting address of the region
  668. *
  669. * The goal is dropped if it can not be satisfied and the allocation will
  670. * fall back to memory below @goal.
  671. *
  672. * Allocation may happen on any node in the system.
  673. *
  674. * The function panics if the request can not be satisfied.
  675. */
  676. void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
  677. unsigned long goal)
  678. {
  679. return ___alloc_bootmem(size, align, goal, ARCH_LOW_ADDRESS_LIMIT);
  680. }
  681. void * __init __alloc_bootmem_low_nopanic(unsigned long size,
  682. unsigned long align,
  683. unsigned long goal)
  684. {
  685. return ___alloc_bootmem_nopanic(size, align, goal,
  686. ARCH_LOW_ADDRESS_LIMIT);
  687. }
  688. /**
  689. * __alloc_bootmem_low_node - allocate low boot memory from a specific node
  690. * @pgdat: node to allocate from
  691. * @size: size of the request in bytes
  692. * @align: alignment of the region
  693. * @goal: preferred starting address of the region
  694. *
  695. * The goal is dropped if it can not be satisfied and the allocation will
  696. * fall back to memory below @goal.
  697. *
  698. * Allocation may fall back to any node in the system if the specified node
  699. * can not hold the requested memory.
  700. *
  701. * The function panics if the request can not be satisfied.
  702. */
  703. void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
  704. unsigned long align, unsigned long goal)
  705. {
  706. if (WARN_ON_ONCE(slab_is_available()))
  707. return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
  708. return ___alloc_bootmem_node(pgdat, size, align,
  709. goal, ARCH_LOW_ADDRESS_LIMIT);
  710. }