test_vm_map_phys.c 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283
  1. /*
  2. * Copyright (c) 2024 Agustina Arzille.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. * This test module tests the physical mapping API for VM maps.
  18. */
  19. #include <stdio.h>
  20. #include <kern/capability.h>
  21. #include <kern/kmessage.h>
  22. #include <kern/task.h>
  23. #include <test/test.h>
  24. #include <vm/map.h>
  25. #include <vm/page.h>
  26. #define TEST_VM_MAP_PHYS_OFFSET (PAGE_SIZE * 14)
  27. /*
  28. * Use a global to accumlate the returned pages since we need this
  29. * address to reside in userspace, and the stack for the landing
  30. * pad is in kernel space.
  31. */
  32. static void* test_vm_map_phys_room;
  33. #if !defined (__LP64__) && defined (__i386__)
  34. __attribute__ ((regparm (2)))
  35. #endif
  36. static void
  37. test_vm_map_phys_entry (struct ipc_msg *msg, struct ipc_msg_data *data)
  38. {
  39. if (!(data->flags & IPC_MSG_KERNEL))
  40. cap_reply_bytes (0, 0, -EINVAL);
  41. _Auto kmsg = (struct kmessage *)msg->iovs->iov_base;
  42. if (kmsg->type == KMSG_TYPE_MMAP_REQ)
  43. {
  44. test_assert_ne (kmsg->mmap_req.prot & VM_PROT_READ, 0);
  45. test_assert_eq (kmsg->mmap_req.offset, TEST_VM_MAP_PHYS_OFFSET);
  46. ssize_t rv = cap_reply_pagereq (0, 0);
  47. test_assert_eq (rv, -EINVAL);
  48. cap_reply_bytes (0, 0, 0);
  49. }
  50. else if (kmsg->type != KMSG_TYPE_PAGE_REQ)
  51. cap_reply_bytes (0, 0, -EINVAL);
  52. int flags = VM_MAP_FLAGS (VM_PROT_RDWR, VM_PROT_RDWR,
  53. VM_INHERIT_SHARE, VM_ADV_DEFAULT,
  54. VM_MAP_PHYS | VM_MAP_ANON);
  55. uintptr_t addr = 0, psz = kmsg->page_req.end - kmsg->page_req.start;
  56. int error = vm_map_enter (vm_map_self (), &addr, psz, flags, 0, 0);
  57. test_assert_zero (error);
  58. uintptr_t npg = psz / PAGE_SIZE, last_va = addr + psz,
  59. *buf = test_vm_map_phys_room;
  60. for (uintptr_t i = 0; i < npg; ++i, last_va -= PAGE_SIZE)
  61. {
  62. *(unsigned char *)addr = (unsigned char)((i + 1) * 20);
  63. buf[i] = last_va - PAGE_SIZE;
  64. addr += PAGE_SIZE;
  65. }
  66. ssize_t rv = cap_reply_bytes (0, 0, 0);
  67. test_assert_eq (rv, -EINVAL);
  68. cap_reply_pagereq (buf, npg);
  69. panic ("shouldn't return");
  70. }
  71. static void
  72. test_vm_map_phys_handle_dirty (struct vm_object *obj, uintptr_t va)
  73. {
  74. void *ptr;
  75. int ret = vm_map_anon_alloc (&ptr, vm_map_self (), PAGE_SIZE * 4);
  76. test_assert_zero (ret);
  77. struct
  78. {
  79. uint64_t offsets[16];
  80. struct iovec iovs[3];
  81. struct cap_page_info pginfo;
  82. } *p = ptr;
  83. p->pginfo.size = sizeof (p->pginfo);
  84. p->pginfo.flags = 0;
  85. p->pginfo.offset_cnt = ARRAY_SIZE (p->offsets);
  86. p->pginfo.offsets = p->offsets;
  87. p->pginfo.iovs = NULL;
  88. p->pginfo.iov_cnt = 0;
  89. ret = vm_object_map_dirty (obj, &p->pginfo);
  90. test_assert_eq (ret, 3);
  91. test_assert_eq (*(unsigned char *)p->pginfo.vme.addr, 0xff);
  92. // This call will clean the page, before marking it read-only.
  93. vm_map_remove (vm_map_self (), p->pginfo.vme.addr,
  94. p->pginfo.vme.addr + PAGE_SIZE);
  95. // This mutation will mark it writable and dirty.
  96. *(unsigned char *)va = 0xfe;
  97. ret = vm_object_list_dirty (obj, &p->pginfo);
  98. test_assert_eq (ret, 3);
  99. for (int i = 0; i < 3; ++i)
  100. {
  101. p->iovs[i].iov_base = (char *)ptr + PAGE_SIZE * (i + 1);
  102. p->iovs[i].iov_len = PAGE_SIZE;
  103. }
  104. p->pginfo.iovs = p->iovs;
  105. p->pginfo.iov_cnt = 4;
  106. p->offsets[3] = PAGE_SIZE;
  107. p->pginfo.offset_cnt = 4;
  108. ret = vm_object_copy_pages (obj, &p->pginfo);
  109. test_assert_eq (ret, PAGE_SIZE * 3);
  110. test_assert_ne (p->offsets[3] & 1, 0);
  111. test_assert_eq (*(unsigned char *)p->iovs[0].iov_base, 0xfe);
  112. test_assert_eq (*(unsigned char *)p->iovs[1].iov_base, 0xff);
  113. test_assert_eq (*(unsigned char *)p->iovs[2].iov_base, 0xff);
  114. // Make sure the pages are clean.
  115. for (uint32_t i = 0; i < p->pginfo.offset_cnt - 1; ++i)
  116. {
  117. _Auto page = vm_object_lookup (obj, p->offsets[i]);
  118. test_assert_nonnull (page);
  119. test_assert_eq (page->dirty, VM_PAGE_CLEAN);
  120. vm_page_unref (page);
  121. }
  122. }
  123. static void
  124. test_vm_map_phys_cap (void *arg __unused)
  125. {
  126. struct cap_flow *flow;
  127. int error = cap_flow_create (&flow, CAP_FLOW_EXT_PAGER | CAP_FLOW_PAGER_FLUSHES,
  128. 1, (uintptr_t)test_vm_map_phys_entry);
  129. test_assert_zero (error);
  130. struct cap_channel *ch;
  131. error = cap_channel_create (&ch, flow, 2);
  132. test_assert_zero (error);
  133. void *ptr;
  134. error = vm_map_anon_alloc (&ptr, vm_map_self (), PAGE_SIZE * 3);
  135. test_assert_zero (error);
  136. _Auto stkpage = vm_page_alloc (0, VM_PAGE_SEL_DIRECTMAP,
  137. VM_PAGE_KERNEL, 0);
  138. test_assert_nonnull (stkpage);
  139. vm_page_init_refcount (stkpage);
  140. {
  141. struct
  142. {
  143. struct ipc_msg msg;
  144. struct iovec iov;
  145. struct ipc_msg_data mdata;
  146. struct cap_thread_info info;
  147. uint64_t buf[16];
  148. } *p = ptr;
  149. p->msg.size = sizeof (p->msg);
  150. p->iov.iov_base = p->buf;
  151. p->iov.iov_len = sizeof (p->buf);
  152. p->msg.iovs = &p->iov;
  153. p->msg.iov_cnt = 1;
  154. p->mdata.size = sizeof (p->mdata);
  155. error = cap_flow_add_lpad (flow, (char *)vm_page_direct_ptr (stkpage) +
  156. PAGE_SIZE, PAGE_SIZE, &p->msg,
  157. &p->mdata, &p->info);
  158. test_assert_zero (error);
  159. test_vm_map_phys_room = (char *)ptr + PAGE_SIZE * 2;
  160. }
  161. uintptr_t va = 0;
  162. int flags = VM_MAP_FLAGS (VM_PROT_RDWR, VM_PROT_RDWR, VM_INHERIT_SHARE,
  163. VM_ADV_DEFAULT, 0);
  164. _Auto obj = cap_channel_get_vmobj (ch);
  165. test_assert_nonnull (obj);
  166. test_assert_ne (obj->flags & VM_OBJECT_EXTERNAL, 0);
  167. error = vm_map_enter (vm_map_self (), &va, PAGE_SIZE * 3, flags,
  168. obj, TEST_VM_MAP_PHYS_OFFSET);
  169. test_assert_zero (error);
  170. for (int i = 0; i < 3; ++i)
  171. {
  172. test_assert_eq (*(unsigned char *)(va + PAGE_SIZE * i), 60 - (20 * i));
  173. *(unsigned char *)(va + PAGE_SIZE * i) = 0xff;
  174. }
  175. test_vm_map_phys_handle_dirty (obj, va);
  176. _Auto entry = vm_map_find (vm_map_self (), va + PAGE_SIZE);
  177. test_assert_nonnull (entry);
  178. test_assert_ge (va + PAGE_SIZE, entry->start);
  179. test_assert_eq (entry->object, obj);
  180. vm_map_entry_put (entry);
  181. cap_channel_put_vmobj (ch);
  182. vm_page_unref (stkpage);
  183. cap_base_rel (ch);
  184. cap_base_rel (flow);
  185. }
  186. static void
  187. test_vm_map_phys (void *arg __unused)
  188. {
  189. _Auto map = vm_map_self ();
  190. uintptr_t addr = 0;
  191. int flags = VM_MAP_FLAGS (VM_PROT_RDWR, VM_PROT_RDWR, VM_INHERIT_SHARE,
  192. VM_ADV_DEFAULT, VM_MAP_PHYS | VM_MAP_ANON);
  193. int error = vm_map_enter (map, &addr, PAGE_SIZE * 3, flags, 0, 0);
  194. test_assert_zero (error);
  195. // Pin the thread to prevent the 'pmap_extract' call from failing below.
  196. thread_pin ();
  197. {
  198. unsigned char *ptr = (unsigned char *)(addr + PAGE_SIZE);
  199. unsigned char val = 0;
  200. for (size_t i = 0; i < PAGE_SIZE; ++i)
  201. val |= ptr[i];
  202. test_assert_zero (val);
  203. *ptr = 42;
  204. }
  205. phys_addr_t pa;
  206. error = pmap_extract (map->pmap, addr + PAGE_SIZE, &pa);
  207. thread_unpin ();
  208. test_assert_zero (error);
  209. uintptr_t va2 = 0;
  210. error = vm_map_enter (map, &va2, PAGE_SIZE, flags & ~VM_MAP_ANON, 0, pa);
  211. test_assert_zero (error);
  212. test_assert_eq (*(unsigned char *)va2, 42);
  213. }
  214. TEST_DEFERRED (vm_map_phys)
  215. {
  216. int error;
  217. struct thread *thread;
  218. error = test_util_create_thr (&thread, test_vm_map_phys,
  219. NULL, "vm_map_phys");
  220. test_assert_zero (error);
  221. thread_join (thread);
  222. error = test_util_create_thr (&thread, test_vm_map_phys_cap,
  223. NULL, "vm_map_phys_cap");
  224. test_assert_zero (error);
  225. thread_join (thread);
  226. return (TEST_OK);
  227. }