vm_map.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841
  1. /*
  2. * Copyright (c) 2011-2017 Richard Braun.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. *
  18. * XXX This module is far from complete. It just provides the basic support
  19. * needed for kernel allocation.
  20. */
  21. #include <assert.h>
  22. #include <errno.h>
  23. #include <stdbool.h>
  24. #include <stddef.h>
  25. #include <stdint.h>
  26. #include <stdio.h>
  27. #include <kern/init.h>
  28. #include <kern/kmem.h>
  29. #include <kern/list.h>
  30. #include <kern/macros.h>
  31. #include <kern/mutex.h>
  32. #include <kern/panic.h>
  33. #include <kern/rbtree.h>
  34. #include <kern/shell.h>
  35. #include <kern/task.h>
  36. #include <machine/page.h>
  37. #include <machine/pmap.h>
  38. #include <vm/vm_adv.h>
  39. #include <vm/vm_inherit.h>
  40. #include <vm/vm_map.h>
  41. #include <vm/vm_kmem.h>
  42. #include <vm/vm_page.h>
  43. #include <vm/vm_prot.h>
  44. /*
  45. * Special threshold which disables the use of the free area cache address.
  46. */
  47. #define VM_MAP_NO_FIND_CACHE (~(size_t)0)
  48. /*
  49. * Mapping request.
  50. *
  51. * Most members are input parameters from a call to e.g. vm_map_enter(). The
  52. * start member is also an output argument. The next member is used internally
  53. * by the mapping functions.
  54. */
  55. struct vm_map_request {
  56. uintptr_t start;
  57. size_t size;
  58. size_t align;
  59. int flags;
  60. struct vm_object *object;
  61. uint64_t offset;
  62. struct vm_map_entry *next;
  63. };
  64. static int vm_map_prepare(struct vm_map *map, uintptr_t start,
  65. size_t size, size_t align, int flags,
  66. struct vm_object *object, uint64_t offset,
  67. struct vm_map_request *request);
  68. static int vm_map_insert(struct vm_map *map, struct vm_map_entry *entry,
  69. const struct vm_map_request *request);
  70. static struct kmem_cache vm_map_entry_cache;
  71. static struct kmem_cache vm_map_cache;
  72. struct vm_map vm_map_kernel_map;
  73. static struct vm_map_entry *
  74. vm_map_entry_create(void)
  75. {
  76. struct vm_map_entry *entry;
  77. entry = kmem_cache_alloc(&vm_map_entry_cache);
  78. /* TODO Handle error */
  79. if (entry == NULL) {
  80. panic("vm_map: can't create map entry");
  81. }
  82. return entry;
  83. }
  84. static void
  85. vm_map_entry_destroy(struct vm_map_entry *entry)
  86. {
  87. kmem_cache_free(&vm_map_entry_cache, entry);
  88. }
  89. static inline int
  90. vm_map_entry_cmp_lookup(uintptr_t addr, const struct rbtree_node *node)
  91. {
  92. struct vm_map_entry *entry;
  93. entry = rbtree_entry(node, struct vm_map_entry, tree_node);
  94. if (addr >= entry->end) {
  95. return 1;
  96. }
  97. if (addr >= entry->start) {
  98. return 0;
  99. }
  100. return -1;
  101. }
  102. static inline int
  103. vm_map_entry_cmp_insert(const struct rbtree_node *a,
  104. const struct rbtree_node *b)
  105. {
  106. struct vm_map_entry *entry;
  107. entry = rbtree_entry(a, struct vm_map_entry, tree_node);
  108. return vm_map_entry_cmp_lookup(entry->start, b);
  109. }
  110. static bool
  111. vm_map_request_valid(const struct vm_map_request *request)
  112. {
  113. return ((request->object != NULL) || (request->offset == 0))
  114. && (vm_page_aligned(request->offset))
  115. && (vm_page_aligned(request->start))
  116. && (request->size > 0)
  117. && (vm_page_aligned(request->size))
  118. && ((request->start + request->size) > request->start)
  119. && ((request->align == 0) || (request->align >= PAGE_SIZE))
  120. && (ISP2(request->align))
  121. && ((VM_MAP_PROT(request->flags) & VM_MAP_MAXPROT(request->flags))
  122. == VM_MAP_PROT(request->flags))
  123. && (!(request->flags & VM_MAP_FIXED)
  124. || (request->align == 0)
  125. || P2ALIGNED(request->start, request->align));
  126. }
  127. /*
  128. * Look up an entry in a map.
  129. *
  130. * This function returns the entry which is closest to the given address
  131. * such that addr < entry->end (i.e. either containing or after the requested
  132. * address), or NULL if there is no such entry.
  133. */
  134. static struct vm_map_entry *
  135. vm_map_lookup_nearest(struct vm_map *map, uintptr_t addr)
  136. {
  137. struct vm_map_entry *entry;
  138. struct rbtree_node *node;
  139. assert(vm_page_aligned(addr));
  140. entry = map->lookup_cache;
  141. if ((entry != NULL) && (addr >= entry->start) && (addr < entry->end)) {
  142. return entry;
  143. }
  144. node = rbtree_lookup_nearest(&map->entry_tree, addr,
  145. vm_map_entry_cmp_lookup, RBTREE_RIGHT);
  146. if (node != NULL) {
  147. entry = rbtree_entry(node, struct vm_map_entry, tree_node);
  148. assert(addr < entry->end);
  149. map->lookup_cache = entry;
  150. return entry;
  151. }
  152. return NULL;
  153. }
  154. static void
  155. vm_map_reset_find_cache(struct vm_map *map)
  156. {
  157. map->find_cache = 0;
  158. map->find_cache_threshold = VM_MAP_NO_FIND_CACHE;
  159. }
  160. static int
  161. vm_map_find_fixed(struct vm_map *map, struct vm_map_request *request)
  162. {
  163. struct vm_map_entry *next;
  164. uintptr_t start;
  165. size_t size;
  166. start = request->start;
  167. size = request->size;
  168. if ((start < map->start) || (start + size) > map->end) {
  169. return ENOMEM;
  170. }
  171. next = vm_map_lookup_nearest(map, start);
  172. if (next == NULL) {
  173. if ((map->end - start) < size) {
  174. return ENOMEM;
  175. }
  176. request->next = NULL;
  177. return 0;
  178. }
  179. if ((start >= next->start) || ((next->start - start) < size)) {
  180. return ENOMEM;
  181. }
  182. request->next = next;
  183. return 0;
  184. }
  185. static int
  186. vm_map_find_avail(struct vm_map *map, struct vm_map_request *request)
  187. {
  188. struct vm_map_entry *next;
  189. struct list *node;
  190. uintptr_t base, start;
  191. size_t size, align, space;
  192. int error;
  193. /* If there is a hint, try there */
  194. if (request->start != 0) {
  195. error = vm_map_find_fixed(map, request);
  196. if (!error) {
  197. return 0;
  198. }
  199. }
  200. size = request->size;
  201. align = request->align;
  202. if (size > map->find_cache_threshold) {
  203. base = map->find_cache;
  204. } else {
  205. base = map->start;
  206. /*
  207. * Searching from the map start means the area which size is the
  208. * threshold (or a smaller one) may be selected, making the threshold
  209. * invalid. Reset it.
  210. */
  211. map->find_cache_threshold = 0;
  212. }
  213. retry:
  214. start = base;
  215. next = vm_map_lookup_nearest(map, start);
  216. for (;;) {
  217. assert(start <= map->end);
  218. if (align != 0) {
  219. start = P2ROUND(start, align);
  220. }
  221. /*
  222. * The end of the map has been reached, and no space could be found.
  223. * If the search didn't start at map->start, retry from there in case
  224. * space is available below the previous start address.
  225. */
  226. if ((map->end - start) < size) {
  227. if (base != map->start) {
  228. base = map->start;
  229. map->find_cache_threshold = 0;
  230. goto retry;
  231. }
  232. return ENOMEM;
  233. }
  234. if (next == NULL) {
  235. space = map->end - start;
  236. } else if (start >= next->start) {
  237. space = 0;
  238. } else {
  239. space = next->start - start;
  240. }
  241. if (space >= size) {
  242. map->find_cache = start + size;
  243. request->start = start;
  244. request->next = next;
  245. return 0;
  246. }
  247. if (space > map->find_cache_threshold) {
  248. map->find_cache_threshold = space;
  249. }
  250. start = next->end;
  251. node = list_next(&next->list_node);
  252. if (list_end(&map->entry_list, node)) {
  253. next = NULL;
  254. } else {
  255. next = list_entry(node, struct vm_map_entry, list_node);
  256. }
  257. }
  258. }
  259. static inline struct vm_map_entry *
  260. vm_map_next(struct vm_map *map, struct vm_map_entry *entry)
  261. {
  262. struct list *node;
  263. node = list_next(&entry->list_node);
  264. if (list_end(&map->entry_list, node)) {
  265. return NULL;
  266. } else {
  267. return list_entry(node, struct vm_map_entry, list_node);
  268. }
  269. }
  270. static void
  271. vm_map_link(struct vm_map *map, struct vm_map_entry *entry,
  272. struct vm_map_entry *next)
  273. {
  274. assert(entry->start < entry->end);
  275. if (next == NULL) {
  276. list_insert_tail(&map->entry_list, &entry->list_node);
  277. } else {
  278. list_insert_before(&entry->list_node, &next->list_node);
  279. }
  280. rbtree_insert(&map->entry_tree, &entry->tree_node, vm_map_entry_cmp_insert);
  281. map->nr_entries++;
  282. }
  283. static void
  284. vm_map_unlink(struct vm_map *map, struct vm_map_entry *entry)
  285. {
  286. assert(entry->start < entry->end);
  287. if (map->lookup_cache == entry) {
  288. map->lookup_cache = NULL;
  289. }
  290. list_remove(&entry->list_node);
  291. rbtree_remove(&map->entry_tree, &entry->tree_node);
  292. map->nr_entries--;
  293. }
  294. /*
  295. * Check mapping parameters, find a suitable area of virtual memory, and
  296. * prepare the mapping request for that region.
  297. */
  298. static int
  299. vm_map_prepare(struct vm_map *map, uintptr_t start,
  300. size_t size, size_t align, int flags,
  301. struct vm_object *object, uint64_t offset,
  302. struct vm_map_request *request)
  303. {
  304. int error;
  305. request->start = start;
  306. request->size = size;
  307. request->align = align;
  308. request->flags = flags;
  309. request->object = object;
  310. request->offset = offset;
  311. assert(vm_map_request_valid(request));
  312. if (flags & VM_MAP_FIXED) {
  313. error = vm_map_find_fixed(map, request);
  314. } else {
  315. error = vm_map_find_avail(map, request);
  316. }
  317. return error;
  318. }
  319. /*
  320. * Merging functions.
  321. *
  322. * There is room for optimization (e.g. not reinserting entries when it is
  323. * known the tree doesn't need to be adjusted), but focus on correctness for
  324. * now.
  325. */
  326. static inline int
  327. vm_map_try_merge_compatible(const struct vm_map_request *request,
  328. const struct vm_map_entry *entry)
  329. {
  330. /* Only merge special kernel mappings for now */
  331. return (request->object == NULL)
  332. && (entry->object == NULL)
  333. && ((request->flags & VM_MAP_ENTRY_MASK)
  334. == (entry->flags & VM_MAP_ENTRY_MASK));
  335. }
  336. static struct vm_map_entry *
  337. vm_map_try_merge_prev(struct vm_map *map, const struct vm_map_request *request,
  338. struct vm_map_entry *entry)
  339. {
  340. struct vm_map_entry *next;
  341. assert(entry != NULL);
  342. if (!vm_map_try_merge_compatible(request, entry)) {
  343. return NULL;
  344. }
  345. if (entry->end != request->start) {
  346. return NULL;
  347. }
  348. next = vm_map_next(map, entry);
  349. vm_map_unlink(map, entry);
  350. entry->end += request->size;
  351. vm_map_link(map, entry, next);
  352. return entry;
  353. }
  354. static struct vm_map_entry *
  355. vm_map_try_merge_next(struct vm_map *map, const struct vm_map_request *request,
  356. struct vm_map_entry *entry)
  357. {
  358. struct vm_map_entry *next;
  359. uintptr_t end;
  360. assert(entry != NULL);
  361. if (!vm_map_try_merge_compatible(request, entry)) {
  362. return NULL;
  363. }
  364. end = request->start + request->size;
  365. if (end != entry->start) {
  366. return NULL;
  367. }
  368. next = vm_map_next(map, entry);
  369. vm_map_unlink(map, entry);
  370. entry->start = request->start;
  371. vm_map_link(map, entry, next);
  372. return entry;
  373. }
  374. static struct vm_map_entry *
  375. vm_map_try_merge_near(struct vm_map *map, const struct vm_map_request *request,
  376. struct vm_map_entry *first, struct vm_map_entry *second)
  377. {
  378. struct vm_map_entry *entry;
  379. assert(first != NULL);
  380. assert(second != NULL);
  381. if ((first->end == request->start)
  382. && ((request->start + request->size) == second->start)
  383. && vm_map_try_merge_compatible(request, first)
  384. && vm_map_try_merge_compatible(request, second)) {
  385. struct vm_map_entry *next;
  386. next = vm_map_next(map, second);
  387. vm_map_unlink(map, first);
  388. vm_map_unlink(map, second);
  389. first->end = second->end;
  390. vm_map_entry_destroy(second);
  391. vm_map_link(map, first, next);
  392. return first;
  393. }
  394. entry = vm_map_try_merge_prev(map, request, first);
  395. if (entry != NULL) {
  396. return entry;
  397. }
  398. return vm_map_try_merge_next(map, request, second);
  399. }
  400. static struct vm_map_entry *
  401. vm_map_try_merge(struct vm_map *map, const struct vm_map_request *request)
  402. {
  403. struct vm_map_entry *entry, *prev;
  404. struct list *node;
  405. /* Statically allocated map entries must not be merged */
  406. assert(!(request->flags & VM_MAP_NOMERGE));
  407. if (request->next == NULL) {
  408. node = list_last(&map->entry_list);
  409. if (list_end(&map->entry_list, node)) {
  410. entry = NULL;
  411. } else {
  412. prev = list_entry(node, struct vm_map_entry, list_node);
  413. entry = vm_map_try_merge_prev(map, request, prev);
  414. }
  415. } else {
  416. node = list_prev(&request->next->list_node);
  417. if (list_end(&map->entry_list, node)) {
  418. entry = vm_map_try_merge_next(map, request, request->next);
  419. } else {
  420. prev = list_entry(node, struct vm_map_entry, list_node);
  421. entry = vm_map_try_merge_near(map, request, prev, request->next);
  422. }
  423. }
  424. return entry;
  425. }
  426. /*
  427. * Convert a prepared mapping request into an entry in the given map.
  428. *
  429. * If entry is NULL, a map entry is allocated for the mapping.
  430. */
  431. static int
  432. vm_map_insert(struct vm_map *map, struct vm_map_entry *entry,
  433. const struct vm_map_request *request)
  434. {
  435. if (entry == NULL) {
  436. entry = vm_map_try_merge(map, request);
  437. if (entry != NULL) {
  438. goto out;
  439. }
  440. entry = vm_map_entry_create();
  441. }
  442. entry->start = request->start;
  443. entry->end = request->start + request->size;
  444. entry->object = request->object;
  445. entry->offset = request->offset;
  446. entry->flags = request->flags & VM_MAP_ENTRY_MASK;
  447. vm_map_link(map, entry, request->next);
  448. out:
  449. map->size += request->size;
  450. return 0;
  451. }
  452. int
  453. vm_map_enter(struct vm_map *map, uintptr_t *startp,
  454. size_t size, size_t align, int flags,
  455. struct vm_object *object, uint64_t offset)
  456. {
  457. struct vm_map_request request;
  458. int error;
  459. mutex_lock(&map->lock);
  460. error = vm_map_prepare(map, *startp, size, align, flags, object, offset,
  461. &request);
  462. if (error) {
  463. goto error_enter;
  464. }
  465. error = vm_map_insert(map, NULL, &request);
  466. if (error) {
  467. goto error_enter;
  468. }
  469. mutex_unlock(&map->lock);
  470. *startp = request.start;
  471. return 0;
  472. error_enter:
  473. vm_map_reset_find_cache(map);
  474. mutex_unlock(&map->lock);
  475. return error;
  476. }
  477. static void
  478. vm_map_split_entries(struct vm_map_entry *prev, struct vm_map_entry *next,
  479. uintptr_t split_addr)
  480. {
  481. uintptr_t delta;
  482. delta = split_addr - prev->start;
  483. prev->end = split_addr;
  484. next->start = split_addr;
  485. if (next->object != NULL) {
  486. next->offset += delta;
  487. }
  488. }
  489. static void
  490. vm_map_clip_start(struct vm_map *map, struct vm_map_entry *entry,
  491. uintptr_t start)
  492. {
  493. struct vm_map_entry *new_entry, *next;
  494. if ((start <= entry->start) || (start >= entry->end)) {
  495. return;
  496. }
  497. next = vm_map_next(map, entry);
  498. vm_map_unlink(map, entry);
  499. new_entry = vm_map_entry_create();
  500. *new_entry = *entry;
  501. vm_map_split_entries(new_entry, entry, start);
  502. vm_map_link(map, entry, next);
  503. vm_map_link(map, new_entry, entry);
  504. }
  505. static void
  506. vm_map_clip_end(struct vm_map *map, struct vm_map_entry *entry, uintptr_t end)
  507. {
  508. struct vm_map_entry *new_entry, *next;
  509. if ((end <= entry->start) || (end >= entry->end)) {
  510. return;
  511. }
  512. next = vm_map_next(map, entry);
  513. vm_map_unlink(map, entry);
  514. new_entry = vm_map_entry_create();
  515. *new_entry = *entry;
  516. vm_map_split_entries(entry, new_entry, end);
  517. vm_map_link(map, entry, next);
  518. vm_map_link(map, new_entry, next);
  519. }
  520. void
  521. vm_map_remove(struct vm_map *map, uintptr_t start, uintptr_t end)
  522. {
  523. struct vm_map_entry *entry;
  524. struct list *node;
  525. assert(start >= map->start);
  526. assert(end <= map->end);
  527. assert(start < end);
  528. mutex_lock(&map->lock);
  529. entry = vm_map_lookup_nearest(map, start);
  530. if (entry == NULL) {
  531. goto out;
  532. }
  533. vm_map_clip_start(map, entry, start);
  534. while (entry->start < end) {
  535. vm_map_clip_end(map, entry, end);
  536. map->size -= entry->end - entry->start;
  537. node = list_next(&entry->list_node);
  538. vm_map_unlink(map, entry);
  539. /* TODO Defer destruction to shorten critical section */
  540. vm_map_entry_destroy(entry);
  541. if (list_end(&map->entry_list, node)) {
  542. break;
  543. }
  544. entry = list_entry(node, struct vm_map_entry, list_node);
  545. }
  546. vm_map_reset_find_cache(map);
  547. out:
  548. mutex_unlock(&map->lock);
  549. }
  550. static void
  551. vm_map_init(struct vm_map *map, struct pmap *pmap,
  552. uintptr_t start, uintptr_t end)
  553. {
  554. assert(vm_page_aligned(start));
  555. assert(vm_page_aligned(end));
  556. assert(start < end);
  557. mutex_init(&map->lock);
  558. list_init(&map->entry_list);
  559. rbtree_init(&map->entry_tree);
  560. map->nr_entries = 0;
  561. map->start = start;
  562. map->end = end;
  563. map->size = 0;
  564. map->lookup_cache = NULL;
  565. vm_map_reset_find_cache(map);
  566. map->pmap = pmap;
  567. }
  568. #ifdef CONFIG_SHELL
  569. static void
  570. vm_map_shell_info(struct shell *shell, int argc, char **argv)
  571. {
  572. const struct task *task;
  573. (void)shell;
  574. if (argc < 2) {
  575. goto error;
  576. } else {
  577. task = task_lookup(argv[1]);
  578. if (task == NULL) {
  579. goto error;
  580. }
  581. vm_map_info(task_get_vm_map(task));
  582. }
  583. return;
  584. error:
  585. printf("vm_map: info: invalid arguments\n");
  586. }
  587. static struct shell_cmd vm_map_shell_cmds[] = {
  588. SHELL_CMD_INITIALIZER("vm_map_info", vm_map_shell_info,
  589. "vm_map_info <task_name>",
  590. "display information about a VM map"),
  591. };
  592. static int __init
  593. vm_map_setup_shell(void)
  594. {
  595. SHELL_REGISTER_CMDS(vm_map_shell_cmds, shell_get_main_cmd_set());
  596. return 0;
  597. }
  598. INIT_OP_DEFINE(vm_map_setup_shell,
  599. INIT_OP_DEP(mutex_setup, true),
  600. INIT_OP_DEP(printf_setup, true),
  601. INIT_OP_DEP(shell_setup, true),
  602. INIT_OP_DEP(task_setup, true),
  603. INIT_OP_DEP(vm_map_setup, true));
  604. #endif /* CONFIG_SHELL */
  605. static int __init
  606. vm_map_bootstrap(void)
  607. {
  608. vm_map_init(vm_map_get_kernel_map(), pmap_get_kernel_pmap(),
  609. PMAP_START_KMEM_ADDRESS, PMAP_END_KMEM_ADDRESS);
  610. kmem_cache_init(&vm_map_entry_cache, "vm_map_entry",
  611. sizeof(struct vm_map_entry), 0, NULL,
  612. KMEM_CACHE_PAGE_ONLY);
  613. return 0;
  614. }
  615. INIT_OP_DEFINE(vm_map_bootstrap,
  616. INIT_OP_DEP(kmem_bootstrap, true),
  617. INIT_OP_DEP(thread_bootstrap, true));
  618. static int __init
  619. vm_map_setup(void)
  620. {
  621. kmem_cache_init(&vm_map_cache, "vm_map", sizeof(struct vm_map),
  622. 0, NULL, KMEM_CACHE_PAGE_ONLY);
  623. return 0;
  624. }
  625. INIT_OP_DEFINE(vm_map_setup,
  626. INIT_OP_DEP(pmap_setup, true),
  627. INIT_OP_DEP(printf_setup, true),
  628. INIT_OP_DEP(vm_map_bootstrap, true));
  629. int
  630. vm_map_create(struct vm_map **mapp)
  631. {
  632. struct vm_map *map;
  633. struct pmap *pmap;
  634. int error;
  635. map = kmem_cache_alloc(&vm_map_cache);
  636. if (map == NULL) {
  637. error = ENOMEM;
  638. goto error_map;
  639. }
  640. error = pmap_create(&pmap);
  641. if (error) {
  642. goto error_pmap;
  643. }
  644. vm_map_init(map, pmap, PMAP_START_ADDRESS, PMAP_END_ADDRESS);
  645. *mapp = map;
  646. return 0;
  647. error_pmap:
  648. kmem_cache_free(&vm_map_cache, map);
  649. error_map:
  650. return error;
  651. }
  652. void
  653. vm_map_info(struct vm_map *map)
  654. {
  655. struct vm_map_entry *entry;
  656. const char *type, *name;
  657. if (map == vm_map_get_kernel_map()) {
  658. name = "kernel map";
  659. } else {
  660. name = "map";
  661. }
  662. mutex_lock(&map->lock);
  663. printf("vm_map: %s: %016lx-%016lx\n"
  664. "vm_map: start end "
  665. "size offset flags type\n", name,
  666. (unsigned long)map->start, (unsigned long)map->end);
  667. list_for_each_entry(&map->entry_list, entry, list_node) {
  668. if (entry->object == NULL) {
  669. type = "null";
  670. } else {
  671. type = "object";
  672. }
  673. printf("vm_map: %016lx %016lx %8luk %08llx %08x %s\n",
  674. (unsigned long)entry->start, (unsigned long)entry->end,
  675. (unsigned long)(entry->end - entry->start) >> 10,
  676. (unsigned long long)entry->offset, entry->flags, type);
  677. }
  678. printf("vm_map: total: %zuk\n", map->size >> 10);
  679. mutex_unlock(&map->lock);
  680. }