vm_page.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861
  1. /*
  2. * Copyright (c) 2010-2017 Richard Braun.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. *
  18. * This implementation uses the binary buddy system to manage its heap.
  19. * Descriptions of the buddy system can be found in the following works :
  20. * - "UNIX Internals: The New Frontiers", by Uresh Vahalia.
  21. * - "Dynamic Storage Allocation: A Survey and Critical Review",
  22. * by Paul R. Wilson, Mark S. Johnstone, Michael Neely, and David Boles.
  23. *
  24. * In addition, this allocator uses per-CPU pools of pages for order 0
  25. * (i.e. single page) allocations. These pools act as caches (but are named
  26. * differently to avoid confusion with CPU caches) that reduce contention on
  27. * multiprocessor systems. When a pool is empty and cannot provide a page,
  28. * it is filled by transferring multiple pages from the backend buddy system.
  29. * The symmetric case is handled likewise.
  30. */
  31. #include <assert.h>
  32. #include <stdalign.h>
  33. #include <stdbool.h>
  34. #include <stddef.h>
  35. #include <stdint.h>
  36. #include <stdio.h>
  37. #include <string.h>
  38. #include <kern/init.h>
  39. #include <kern/list.h>
  40. #include <kern/log.h>
  41. #include <kern/macros.h>
  42. #include <kern/mutex.h>
  43. #include <kern/panic.h>
  44. #include <kern/printf.h>
  45. #include <kern/shell.h>
  46. #include <kern/thread.h>
  47. #include <machine/boot.h>
  48. #include <machine/cpu.h>
  49. #include <machine/page.h>
  50. #include <machine/pmem.h>
  51. #include <machine/types.h>
  52. #include <vm/vm_page.h>
  53. /*
  54. * Number of free block lists per zone.
  55. */
  56. #define VM_PAGE_NR_FREE_LISTS 11
  57. /*
  58. * The size of a CPU pool is computed by dividing the number of pages in its
  59. * containing zone by this value.
  60. */
  61. #define VM_PAGE_CPU_POOL_RATIO 1024
  62. /*
  63. * Maximum number of pages in a CPU pool.
  64. */
  65. #define VM_PAGE_CPU_POOL_MAX_SIZE 128
  66. /*
  67. * The transfer size of a CPU pool is computed by dividing the pool size by
  68. * this value.
  69. */
  70. #define VM_PAGE_CPU_POOL_TRANSFER_RATIO 2
  71. /*
  72. * Per-processor cache of pages.
  73. */
  74. struct vm_page_cpu_pool {
  75. alignas(CPU_L1_SIZE) struct mutex lock;
  76. int size;
  77. int transfer_size;
  78. int nr_pages;
  79. struct list pages;
  80. };
  81. /*
  82. * Special order value for pages that aren't in a free list. Such pages are
  83. * either allocated, or part of a free block of pages but not the head page.
  84. */
  85. #define VM_PAGE_ORDER_UNLISTED ((unsigned short)-1)
  86. /*
  87. * Doubly-linked list of free blocks.
  88. */
  89. struct vm_page_free_list {
  90. unsigned long size;
  91. struct list blocks;
  92. };
  93. /*
  94. * Zone name buffer size.
  95. */
  96. #define VM_PAGE_NAME_SIZE 16
  97. /*
  98. * Zone of contiguous memory.
  99. */
  100. struct vm_page_zone {
  101. struct vm_page_cpu_pool cpu_pools[CONFIG_MAX_CPUS];
  102. phys_addr_t start;
  103. phys_addr_t end;
  104. struct vm_page *pages;
  105. struct vm_page *pages_end;
  106. struct mutex lock;
  107. struct vm_page_free_list free_lists[VM_PAGE_NR_FREE_LISTS];
  108. unsigned long nr_free_pages;
  109. };
  110. /*
  111. * Bootstrap information about a zone.
  112. */
  113. struct vm_page_boot_zone {
  114. phys_addr_t start;
  115. phys_addr_t end;
  116. bool heap_present;
  117. phys_addr_t avail_start;
  118. phys_addr_t avail_end;
  119. };
  120. static int vm_page_is_ready __read_mostly;
  121. /*
  122. * Zone table.
  123. *
  124. * The system supports a maximum of 4 zones :
  125. * - DMA: suitable for DMA
  126. * - DMA32: suitable for DMA when devices support 32-bits addressing
  127. * - DIRECTMAP: direct physical mapping, allows direct access from
  128. * the kernel with a simple offset translation
  129. * - HIGHMEM: must be mapped before it can be accessed
  130. *
  131. * Zones are ordered by priority, 0 being the lowest priority. Their
  132. * relative priorities are DMA < DMA32 < DIRECTMAP < HIGHMEM. Some zones
  133. * may actually be aliases for others, e.g. if DMA is always possible from
  134. * the direct physical mapping, DMA and DMA32 are aliases for DIRECTMAP,
  135. * in which case the zone table contains DIRECTMAP and HIGHMEM only.
  136. */
  137. static struct vm_page_zone vm_page_zones[PMEM_MAX_ZONES];
  138. /*
  139. * Bootstrap zone table.
  140. */
  141. static struct vm_page_boot_zone vm_page_boot_zones[PMEM_MAX_ZONES]
  142. __initdata;
  143. /*
  144. * Number of loaded zones.
  145. */
  146. static unsigned int vm_page_zones_size __read_mostly;
  147. static void __init
  148. vm_page_init(struct vm_page *page, unsigned short zone_index, phys_addr_t pa)
  149. {
  150. memset(page, 0, sizeof(*page));
  151. page->type = VM_PAGE_RESERVED;
  152. page->zone_index = zone_index;
  153. page->order = VM_PAGE_ORDER_UNLISTED;
  154. page->phys_addr = pa;
  155. page->nr_refs = 0;
  156. page->object = NULL;
  157. }
  158. void
  159. vm_page_set_type(struct vm_page *page, unsigned int order, unsigned short type)
  160. {
  161. unsigned int i, nr_pages;
  162. nr_pages = 1 << order;
  163. for (i = 0; i < nr_pages; i++) {
  164. page[i].type = type;
  165. }
  166. }
  167. static void __init
  168. vm_page_free_list_init(struct vm_page_free_list *free_list)
  169. {
  170. free_list->size = 0;
  171. list_init(&free_list->blocks);
  172. }
  173. static inline void
  174. vm_page_free_list_insert(struct vm_page_free_list *free_list,
  175. struct vm_page *page)
  176. {
  177. assert(page->order == VM_PAGE_ORDER_UNLISTED);
  178. free_list->size++;
  179. list_insert_head(&free_list->blocks, &page->node);
  180. }
  181. static inline void
  182. vm_page_free_list_remove(struct vm_page_free_list *free_list,
  183. struct vm_page *page)
  184. {
  185. assert(page->order != VM_PAGE_ORDER_UNLISTED);
  186. free_list->size--;
  187. list_remove(&page->node);
  188. }
  189. static struct vm_page *
  190. vm_page_zone_alloc_from_buddy(struct vm_page_zone *zone, unsigned int order)
  191. {
  192. struct vm_page_free_list *free_list = free_list;
  193. struct vm_page *page, *buddy;
  194. unsigned int i;
  195. assert(order < VM_PAGE_NR_FREE_LISTS);
  196. for (i = order; i < VM_PAGE_NR_FREE_LISTS; i++) {
  197. free_list = &zone->free_lists[i];
  198. if (free_list->size != 0) {
  199. break;
  200. }
  201. }
  202. if (i == VM_PAGE_NR_FREE_LISTS) {
  203. return NULL;
  204. }
  205. page = list_first_entry(&free_list->blocks, struct vm_page, node);
  206. vm_page_free_list_remove(free_list, page);
  207. page->order = VM_PAGE_ORDER_UNLISTED;
  208. while (i > order) {
  209. i--;
  210. buddy = &page[1 << i];
  211. vm_page_free_list_insert(&zone->free_lists[i], buddy);
  212. buddy->order = i;
  213. }
  214. zone->nr_free_pages -= (1 << order);
  215. return page;
  216. }
  217. static void
  218. vm_page_zone_free_to_buddy(struct vm_page_zone *zone, struct vm_page *page,
  219. unsigned int order)
  220. {
  221. struct vm_page *buddy;
  222. phys_addr_t pa, buddy_pa;
  223. unsigned int nr_pages;
  224. assert(page >= zone->pages);
  225. assert(page < zone->pages_end);
  226. assert(page->order == VM_PAGE_ORDER_UNLISTED);
  227. assert(order < VM_PAGE_NR_FREE_LISTS);
  228. nr_pages = (1 << order);
  229. pa = page->phys_addr;
  230. while (order < (VM_PAGE_NR_FREE_LISTS - 1)) {
  231. buddy_pa = pa ^ vm_page_ptob(1 << order);
  232. if ((buddy_pa < zone->start) || (buddy_pa >= zone->end)) {
  233. break;
  234. }
  235. buddy = &zone->pages[vm_page_btop(buddy_pa - zone->start)];
  236. if (buddy->order != order) {
  237. break;
  238. }
  239. vm_page_free_list_remove(&zone->free_lists[order], buddy);
  240. buddy->order = VM_PAGE_ORDER_UNLISTED;
  241. order++;
  242. pa &= -vm_page_ptob(1 << order);
  243. page = &zone->pages[vm_page_btop(pa - zone->start)];
  244. }
  245. vm_page_free_list_insert(&zone->free_lists[order], page);
  246. page->order = order;
  247. zone->nr_free_pages += nr_pages;
  248. }
  249. static void __init
  250. vm_page_cpu_pool_init(struct vm_page_cpu_pool *cpu_pool, int size)
  251. {
  252. mutex_init(&cpu_pool->lock);
  253. cpu_pool->size = size;
  254. cpu_pool->transfer_size = (size + VM_PAGE_CPU_POOL_TRANSFER_RATIO - 1)
  255. / VM_PAGE_CPU_POOL_TRANSFER_RATIO;
  256. cpu_pool->nr_pages = 0;
  257. list_init(&cpu_pool->pages);
  258. }
  259. static inline struct vm_page_cpu_pool *
  260. vm_page_cpu_pool_get(struct vm_page_zone *zone)
  261. {
  262. return &zone->cpu_pools[cpu_id()];
  263. }
  264. static inline struct vm_page *
  265. vm_page_cpu_pool_pop(struct vm_page_cpu_pool *cpu_pool)
  266. {
  267. struct vm_page *page;
  268. assert(cpu_pool->nr_pages != 0);
  269. cpu_pool->nr_pages--;
  270. page = list_first_entry(&cpu_pool->pages, struct vm_page, node);
  271. list_remove(&page->node);
  272. return page;
  273. }
  274. static inline void
  275. vm_page_cpu_pool_push(struct vm_page_cpu_pool *cpu_pool, struct vm_page *page)
  276. {
  277. assert(cpu_pool->nr_pages < cpu_pool->size);
  278. cpu_pool->nr_pages++;
  279. list_insert_head(&cpu_pool->pages, &page->node);
  280. }
  281. static int
  282. vm_page_cpu_pool_fill(struct vm_page_cpu_pool *cpu_pool,
  283. struct vm_page_zone *zone)
  284. {
  285. struct vm_page *page;
  286. int i;
  287. assert(cpu_pool->nr_pages == 0);
  288. mutex_lock(&zone->lock);
  289. for (i = 0; i < cpu_pool->transfer_size; i++) {
  290. page = vm_page_zone_alloc_from_buddy(zone, 0);
  291. if (page == NULL) {
  292. break;
  293. }
  294. vm_page_cpu_pool_push(cpu_pool, page);
  295. }
  296. mutex_unlock(&zone->lock);
  297. return i;
  298. }
  299. static void
  300. vm_page_cpu_pool_drain(struct vm_page_cpu_pool *cpu_pool,
  301. struct vm_page_zone *zone)
  302. {
  303. struct vm_page *page;
  304. int i;
  305. assert(cpu_pool->nr_pages == cpu_pool->size);
  306. mutex_lock(&zone->lock);
  307. for (i = cpu_pool->transfer_size; i > 0; i--) {
  308. page = vm_page_cpu_pool_pop(cpu_pool);
  309. vm_page_zone_free_to_buddy(zone, page, 0);
  310. }
  311. mutex_unlock(&zone->lock);
  312. }
  313. static phys_addr_t __init
  314. vm_page_zone_size(struct vm_page_zone *zone)
  315. {
  316. return zone->end - zone->start;
  317. }
  318. static int __init
  319. vm_page_zone_compute_pool_size(struct vm_page_zone *zone)
  320. {
  321. phys_addr_t size;
  322. size = vm_page_btop(vm_page_zone_size(zone)) / VM_PAGE_CPU_POOL_RATIO;
  323. if (size == 0) {
  324. size = 1;
  325. } else if (size > VM_PAGE_CPU_POOL_MAX_SIZE) {
  326. size = VM_PAGE_CPU_POOL_MAX_SIZE;
  327. }
  328. return size;
  329. }
  330. static void __init
  331. vm_page_zone_init(struct vm_page_zone *zone, phys_addr_t start, phys_addr_t end,
  332. struct vm_page *pages)
  333. {
  334. phys_addr_t pa;
  335. int pool_size;
  336. unsigned int i;
  337. zone->start = start;
  338. zone->end = end;
  339. pool_size = vm_page_zone_compute_pool_size(zone);
  340. for (i = 0; i < ARRAY_SIZE(zone->cpu_pools); i++) {
  341. vm_page_cpu_pool_init(&zone->cpu_pools[i], pool_size);
  342. }
  343. zone->pages = pages;
  344. zone->pages_end = pages + vm_page_btop(vm_page_zone_size(zone));
  345. mutex_init(&zone->lock);
  346. for (i = 0; i < ARRAY_SIZE(zone->free_lists); i++) {
  347. vm_page_free_list_init(&zone->free_lists[i]);
  348. }
  349. zone->nr_free_pages = 0;
  350. i = zone - vm_page_zones;
  351. for (pa = zone->start; pa < zone->end; pa += PAGE_SIZE) {
  352. vm_page_init(&pages[vm_page_btop(pa - zone->start)], i, pa);
  353. }
  354. }
  355. static struct vm_page *
  356. vm_page_zone_alloc(struct vm_page_zone *zone, unsigned int order,
  357. unsigned short type)
  358. {
  359. struct vm_page_cpu_pool *cpu_pool;
  360. struct vm_page *page;
  361. int filled;
  362. assert(order < VM_PAGE_NR_FREE_LISTS);
  363. if (order == 0) {
  364. thread_pin();
  365. cpu_pool = vm_page_cpu_pool_get(zone);
  366. mutex_lock(&cpu_pool->lock);
  367. if (cpu_pool->nr_pages == 0) {
  368. filled = vm_page_cpu_pool_fill(cpu_pool, zone);
  369. if (!filled) {
  370. mutex_unlock(&cpu_pool->lock);
  371. thread_unpin();
  372. return NULL;
  373. }
  374. }
  375. page = vm_page_cpu_pool_pop(cpu_pool);
  376. mutex_unlock(&cpu_pool->lock);
  377. thread_unpin();
  378. } else {
  379. mutex_lock(&zone->lock);
  380. page = vm_page_zone_alloc_from_buddy(zone, order);
  381. mutex_unlock(&zone->lock);
  382. if (page == NULL) {
  383. return NULL;
  384. }
  385. }
  386. assert(page->type == VM_PAGE_FREE);
  387. vm_page_set_type(page, order, type);
  388. return page;
  389. }
  390. static void
  391. vm_page_zone_free(struct vm_page_zone *zone, struct vm_page *page,
  392. unsigned int order)
  393. {
  394. struct vm_page_cpu_pool *cpu_pool;
  395. assert(page->type != VM_PAGE_FREE);
  396. assert(order < VM_PAGE_NR_FREE_LISTS);
  397. vm_page_set_type(page, order, VM_PAGE_FREE);
  398. if (order == 0) {
  399. thread_pin();
  400. cpu_pool = vm_page_cpu_pool_get(zone);
  401. mutex_lock(&cpu_pool->lock);
  402. if (cpu_pool->nr_pages == cpu_pool->size) {
  403. vm_page_cpu_pool_drain(cpu_pool, zone);
  404. }
  405. vm_page_cpu_pool_push(cpu_pool, page);
  406. mutex_unlock(&cpu_pool->lock);
  407. thread_unpin();
  408. } else {
  409. mutex_lock(&zone->lock);
  410. vm_page_zone_free_to_buddy(zone, page, order);
  411. mutex_unlock(&zone->lock);
  412. }
  413. }
  414. void __init
  415. vm_page_load(unsigned int zone_index, phys_addr_t start, phys_addr_t end)
  416. {
  417. struct vm_page_boot_zone *zone;
  418. assert(zone_index < ARRAY_SIZE(vm_page_boot_zones));
  419. assert(vm_page_aligned(start));
  420. assert(vm_page_aligned(end));
  421. assert(start < end);
  422. assert(vm_page_zones_size < ARRAY_SIZE(vm_page_boot_zones));
  423. zone = &vm_page_boot_zones[zone_index];
  424. zone->start = start;
  425. zone->end = end;
  426. zone->heap_present = false;
  427. log_debug("vm_page: load: %s: %llx:%llx",
  428. vm_page_zone_name(zone_index),
  429. (unsigned long long)start, (unsigned long long)end);
  430. vm_page_zones_size++;
  431. }
  432. void
  433. vm_page_load_heap(unsigned int zone_index, phys_addr_t start, phys_addr_t end)
  434. {
  435. struct vm_page_boot_zone *zone;
  436. assert(zone_index < ARRAY_SIZE(vm_page_boot_zones));
  437. assert(vm_page_aligned(start));
  438. assert(vm_page_aligned(end));
  439. zone = &vm_page_boot_zones[zone_index];
  440. assert(zone->start <= start);
  441. assert(end <= zone-> end);
  442. zone->avail_start = start;
  443. zone->avail_end = end;
  444. zone->heap_present = true;
  445. log_debug("vm_page: heap: %s: %llx:%llx",
  446. vm_page_zone_name(zone_index),
  447. (unsigned long long)start, (unsigned long long)end);
  448. }
  449. int
  450. vm_page_ready(void)
  451. {
  452. return vm_page_is_ready;
  453. }
  454. static unsigned int
  455. vm_page_select_alloc_zone(unsigned int selector)
  456. {
  457. unsigned int zone_index;
  458. switch (selector) {
  459. case VM_PAGE_SEL_DMA:
  460. zone_index = PMEM_ZONE_DMA;
  461. break;
  462. case VM_PAGE_SEL_DMA32:
  463. zone_index = PMEM_ZONE_DMA32;
  464. break;
  465. case VM_PAGE_SEL_DIRECTMAP:
  466. zone_index = PMEM_ZONE_DIRECTMAP;
  467. break;
  468. case VM_PAGE_SEL_HIGHMEM:
  469. zone_index = PMEM_ZONE_HIGHMEM;
  470. break;
  471. default:
  472. panic("vm_page: invalid selector");
  473. }
  474. return MIN(vm_page_zones_size - 1, zone_index);
  475. }
  476. static int __init
  477. vm_page_boot_zone_loaded(const struct vm_page_boot_zone *zone)
  478. {
  479. return (zone->end != 0);
  480. }
  481. static void __init
  482. vm_page_check_boot_zones(void)
  483. {
  484. unsigned int i;
  485. int expect_loaded;
  486. if (vm_page_zones_size == 0) {
  487. panic("vm_page: no physical memory loaded");
  488. }
  489. for (i = 0; i < ARRAY_SIZE(vm_page_boot_zones); i++) {
  490. expect_loaded = (i < vm_page_zones_size);
  491. if (vm_page_boot_zone_loaded(&vm_page_boot_zones[i]) == expect_loaded) {
  492. continue;
  493. }
  494. panic("vm_page: invalid boot zone table");
  495. }
  496. }
  497. static phys_addr_t __init
  498. vm_page_boot_zone_size(struct vm_page_boot_zone *zone)
  499. {
  500. return zone->end - zone->start;
  501. }
  502. static phys_addr_t __init
  503. vm_page_boot_zone_avail_size(struct vm_page_boot_zone *zone)
  504. {
  505. return zone->avail_end - zone->avail_start;
  506. }
  507. static void * __init
  508. vm_page_bootalloc(size_t size)
  509. {
  510. struct vm_page_boot_zone *zone;
  511. phys_addr_t pa;
  512. unsigned int i;
  513. for (i = vm_page_select_alloc_zone(VM_PAGE_SEL_DIRECTMAP);
  514. i < vm_page_zones_size;
  515. i--) {
  516. zone = &vm_page_boot_zones[i];
  517. if (!zone->heap_present) {
  518. continue;
  519. }
  520. if (size <= vm_page_boot_zone_avail_size(zone)) {
  521. pa = zone->avail_start;
  522. zone->avail_start += vm_page_round(size);
  523. return (void *)vm_page_direct_va(pa);
  524. }
  525. }
  526. panic("vm_page: no physical memory available");
  527. }
  528. static void
  529. vm_page_info_common(int (*print_fn)(const char *format, ...))
  530. {
  531. struct vm_page_zone *zone;
  532. unsigned long pages;
  533. unsigned int i;
  534. for (i = 0; i < vm_page_zones_size; i++) {
  535. zone = &vm_page_zones[i];
  536. pages = (unsigned long)(zone->pages_end - zone->pages);
  537. print_fn("vm_page: %s: pages: %lu (%luM), free: %lu (%luM)\n",
  538. vm_page_zone_name(i), pages, pages >> (20 - PAGE_SHIFT),
  539. zone->nr_free_pages, zone->nr_free_pages >> (20 - PAGE_SHIFT));
  540. }
  541. }
  542. #ifdef CONFIG_SHELL
  543. static void
  544. vm_page_info(void)
  545. {
  546. vm_page_info_common(printf);
  547. }
  548. static void
  549. vm_page_shell_info(struct shell *shell, int argc, char **argv)
  550. {
  551. (void)shell;
  552. (void)argc;
  553. (void)argv;
  554. vm_page_info();
  555. }
  556. static struct shell_cmd vm_page_shell_cmds[] = {
  557. SHELL_CMD_INITIALIZER("vm_page_info", vm_page_shell_info,
  558. "vm_page_info",
  559. "display information about physical memory"),
  560. };
  561. static int __init
  562. vm_page_setup_shell(void)
  563. {
  564. SHELL_REGISTER_CMDS(vm_page_shell_cmds, shell_get_main_cmd_set());
  565. return 0;
  566. }
  567. INIT_OP_DEFINE(vm_page_setup_shell,
  568. INIT_OP_DEP(printf_setup, true),
  569. INIT_OP_DEP(shell_setup, true),
  570. INIT_OP_DEP(vm_page_setup, true));
  571. #endif /* CONFIG_SHELL */
  572. static int __init
  573. vm_page_setup(void)
  574. {
  575. struct vm_page_boot_zone *boot_zone;
  576. struct vm_page_zone *zone;
  577. struct vm_page *table, *page, *end;
  578. size_t nr_pages, table_size;
  579. uintptr_t va;
  580. unsigned int i;
  581. phys_addr_t pa;
  582. vm_page_check_boot_zones();
  583. /*
  584. * Compute the page table size.
  585. */
  586. nr_pages = 0;
  587. for (i = 0; i < vm_page_zones_size; i++) {
  588. nr_pages += vm_page_btop(vm_page_boot_zone_size(&vm_page_boot_zones[i]));
  589. }
  590. table_size = vm_page_round(nr_pages * sizeof(struct vm_page));
  591. log_info("vm_page: page table size: %zu entries (%zuk)", nr_pages,
  592. table_size >> 10);
  593. table = vm_page_bootalloc(table_size);
  594. va = (uintptr_t)table;
  595. /*
  596. * Initialize the zones, associating them to the page table. When
  597. * the zones are initialized, all their pages are set allocated.
  598. * Pages are then released, which populates the free lists.
  599. */
  600. for (i = 0; i < vm_page_zones_size; i++) {
  601. zone = &vm_page_zones[i];
  602. boot_zone = &vm_page_boot_zones[i];
  603. vm_page_zone_init(zone, boot_zone->start, boot_zone->end, table);
  604. page = zone->pages + vm_page_btop(boot_zone->avail_start
  605. - boot_zone->start);
  606. end = zone->pages + vm_page_btop(boot_zone->avail_end
  607. - boot_zone->start);
  608. while (page < end) {
  609. page->type = VM_PAGE_FREE;
  610. vm_page_zone_free_to_buddy(zone, page, 0);
  611. page++;
  612. }
  613. table += vm_page_btop(vm_page_zone_size(zone));
  614. }
  615. while (va < (uintptr_t)table) {
  616. pa = vm_page_direct_pa(va);
  617. page = vm_page_lookup(pa);
  618. assert((page != NULL) && (page->type == VM_PAGE_RESERVED));
  619. page->type = VM_PAGE_TABLE;
  620. va += PAGE_SIZE;
  621. }
  622. vm_page_is_ready = 1;
  623. return 0;
  624. }
  625. INIT_OP_DEFINE(vm_page_setup,
  626. INIT_OP_DEP(boot_load_vm_page_zones, true),
  627. INIT_OP_DEP(log_setup, true),
  628. INIT_OP_DEP(printf_setup, true));
  629. /* TODO Rename to avoid confusion with "managed pages" */
  630. void __init
  631. vm_page_manage(struct vm_page *page)
  632. {
  633. assert(page->zone_index < ARRAY_SIZE(vm_page_zones));
  634. assert(page->type == VM_PAGE_RESERVED);
  635. vm_page_set_type(page, 0, VM_PAGE_FREE);
  636. vm_page_zone_free_to_buddy(&vm_page_zones[page->zone_index], page, 0);
  637. }
  638. struct vm_page *
  639. vm_page_lookup(phys_addr_t pa)
  640. {
  641. struct vm_page_zone *zone;
  642. unsigned int i;
  643. for (i = 0; i < vm_page_zones_size; i++) {
  644. zone = &vm_page_zones[i];
  645. if ((pa >= zone->start) && (pa < zone->end)) {
  646. return &zone->pages[vm_page_btop(pa - zone->start)];
  647. }
  648. }
  649. return NULL;
  650. }
  651. static bool
  652. vm_page_block_referenced(const struct vm_page *page, unsigned int order)
  653. {
  654. unsigned int i, nr_pages;
  655. nr_pages = 1 << order;
  656. for (i = 0; i < nr_pages; i++) {
  657. if (vm_page_referenced(&page[i])) {
  658. return true;
  659. }
  660. }
  661. return false;
  662. }
  663. struct vm_page *
  664. vm_page_alloc(unsigned int order, unsigned int selector, unsigned short type)
  665. {
  666. struct vm_page *page;
  667. unsigned int i;
  668. for (i = vm_page_select_alloc_zone(selector); i < vm_page_zones_size; i--) {
  669. page = vm_page_zone_alloc(&vm_page_zones[i], order, type);
  670. if (page != NULL) {
  671. assert(!vm_page_block_referenced(page, order));
  672. return page;
  673. }
  674. }
  675. return NULL;
  676. }
  677. void
  678. vm_page_free(struct vm_page *page, unsigned int order)
  679. {
  680. assert(page->zone_index < ARRAY_SIZE(vm_page_zones));
  681. assert(!vm_page_block_referenced(page, order));
  682. vm_page_zone_free(&vm_page_zones[page->zone_index], page, order);
  683. }
  684. const char *
  685. vm_page_zone_name(unsigned int zone_index)
  686. {
  687. /* Don't use a switch statement since zones can be aliased */
  688. if (zone_index == PMEM_ZONE_HIGHMEM) {
  689. return "HIGHMEM";
  690. } else if (zone_index == PMEM_ZONE_DIRECTMAP) {
  691. return "DIRECTMAP";
  692. } else if (zone_index == PMEM_ZONE_DMA32) {
  693. return "DMA32";
  694. } else if (zone_index == PMEM_ZONE_DMA) {
  695. return "DMA";
  696. } else {
  697. panic("vm_page: invalid zone index");
  698. }
  699. }
  700. void
  701. vm_page_log_info(void)
  702. {
  703. vm_page_info_common(log_info);
  704. }