kern_malloc_debug.c 9.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348
  1. /* $OpenBSD: kern_malloc_debug.c,v 1.34 2014/11/16 12:31:00 deraadt Exp $ */
  2. /*
  3. * Copyright (c) 1999, 2000 Artur Grabowski <art@openbsd.org>
  4. * All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions
  8. * are met:
  9. *
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. The name of the author may not be used to endorse or promote products
  13. * derived from this software without specific prior written permission.
  14. *
  15. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES,
  16. * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
  17. * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
  18. * THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  19. * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  20. * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
  21. * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
  22. * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
  23. * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
  24. * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. */
  26. /*
  27. * This really belongs in kern/kern_malloc.c, but it was too much pollution.
  28. */
  29. /*
  30. * It's only possible to debug one type/size at a time. The question is
  31. * if this is a limitation or a feature. We never want to run this as the
  32. * default malloc because we'll run out of memory really fast. Adding
  33. * more types will also add to the complexity of the code.
  34. *
  35. * This is really simple. Every malloc() allocates two virtual pages,
  36. * the second page is left unmapped, and the value returned is aligned
  37. * so that it ends at (or very close to) the page boundary to catch overflows.
  38. * Every free() changes the protection of the first page to PROT_NONE so
  39. * that we can catch any dangling writes to it.
  40. * To minimize the risk of writes to recycled chunks we keep an LRU of latest
  41. * freed chunks. The length of it is controlled by MALLOC_DEBUG_CHUNKS.
  42. *
  43. * Don't expect any performance.
  44. *
  45. * TODO:
  46. * - support for size >= PAGE_SIZE
  47. * - add support to the fault handler to give better diagnostics if we fail.
  48. */
  49. #include <sys/param.h>
  50. #include <sys/proc.h>
  51. #include <sys/kernel.h>
  52. #include <sys/malloc.h>
  53. #include <sys/systm.h>
  54. #include <sys/pool.h>
  55. #include <uvm/uvm_extern.h>
  56. /*
  57. * debug_malloc_type and debug_malloc_size define the type and size of
  58. * memory to be debugged. Use 0 for a wildcard. debug_malloc_size_lo
  59. * is the lower limit and debug_malloc_size_hi the upper limit of sizes
  60. * being debugged; 0 will not work as a wildcard for the upper limit.
  61. * For any debugging to take place, type must be != -1, size must be >= 0,
  62. * and if the limits are being used, size must be set to 0.
  63. * See /usr/src/sys/sys/malloc.h and malloc(9) for a list of types.
  64. *
  65. * Although those are variables, it's a really bad idea to change the type
  66. * if any memory chunks of this type are used. It's ok to change the size
  67. * in runtime.
  68. */
  69. int debug_malloc_type = -1;
  70. int debug_malloc_size = -1;
  71. int debug_malloc_size_lo = -1;
  72. int debug_malloc_size_hi = -1;
  73. /*
  74. * MALLOC_DEBUG_CHUNKS is the number of memory chunks we require on the
  75. * freelist before we reuse them.
  76. */
  77. #define MALLOC_DEBUG_CHUNKS 16
  78. void debug_malloc_allocate_free(int);
  79. struct debug_malloc_entry {
  80. TAILQ_ENTRY(debug_malloc_entry) md_list;
  81. vaddr_t md_va;
  82. paddr_t md_pa;
  83. size_t md_size;
  84. int md_type;
  85. };
  86. TAILQ_HEAD(,debug_malloc_entry) debug_malloc_freelist;
  87. TAILQ_HEAD(,debug_malloc_entry) debug_malloc_usedlist;
  88. int debug_malloc_allocs;
  89. int debug_malloc_frees;
  90. int debug_malloc_pages;
  91. int debug_malloc_chunks_on_freelist;
  92. int debug_malloc_initialized;
  93. struct pool debug_malloc_pool;
  94. int
  95. debug_malloc(unsigned long size, int type, int flags, void **addr)
  96. {
  97. struct debug_malloc_entry *md = NULL;
  98. int s, wait = (flags & M_NOWAIT) == 0;
  99. /* Careful not to compare unsigned long to int -1 */
  100. if (((type != debug_malloc_type && debug_malloc_type != 0) ||
  101. (size != debug_malloc_size && debug_malloc_size != 0) ||
  102. (debug_malloc_size_lo != -1 && size < debug_malloc_size_lo) ||
  103. (debug_malloc_size_hi != -1 && size > debug_malloc_size_hi) ||
  104. !debug_malloc_initialized) && type != M_DEBUG)
  105. return (0);
  106. /* XXX - fix later */
  107. if (size > PAGE_SIZE)
  108. return (0);
  109. s = splvm();
  110. if (debug_malloc_chunks_on_freelist < MALLOC_DEBUG_CHUNKS)
  111. debug_malloc_allocate_free(wait);
  112. md = TAILQ_FIRST(&debug_malloc_freelist);
  113. if (md == NULL) {
  114. splx(s);
  115. return (0);
  116. }
  117. TAILQ_REMOVE(&debug_malloc_freelist, md, md_list);
  118. debug_malloc_chunks_on_freelist--;
  119. TAILQ_INSERT_HEAD(&debug_malloc_usedlist, md, md_list);
  120. debug_malloc_allocs++;
  121. splx(s);
  122. pmap_kenter_pa(md->md_va, md->md_pa, PROT_READ | PROT_WRITE);
  123. pmap_update(pmap_kernel());
  124. md->md_size = size;
  125. md->md_type = type;
  126. /*
  127. * Align the returned addr so that it ends where the first page
  128. * ends. roundup to get decent alignment.
  129. */
  130. *addr = (void *)(md->md_va + PAGE_SIZE - roundup(size, sizeof(long)));
  131. return (1);
  132. }
  133. int
  134. debug_free(void *addr, int type)
  135. {
  136. struct debug_malloc_entry *md;
  137. vaddr_t va;
  138. int s;
  139. if (type != debug_malloc_type && debug_malloc_type != 0 &&
  140. type != M_DEBUG)
  141. return (0);
  142. /*
  143. * trunc_page to get the address of the page.
  144. */
  145. va = trunc_page((vaddr_t)addr);
  146. s = splvm();
  147. TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list)
  148. if (md->md_va == va)
  149. break;
  150. /*
  151. * If we are not responsible for this entry, let the normal free
  152. * handle it
  153. */
  154. if (md == NULL) {
  155. /*
  156. * sanity check. Check for multiple frees.
  157. */
  158. TAILQ_FOREACH(md, &debug_malloc_freelist, md_list)
  159. if (md->md_va == va)
  160. panic("debug_free: already free");
  161. splx(s);
  162. return (0);
  163. }
  164. debug_malloc_frees++;
  165. TAILQ_REMOVE(&debug_malloc_usedlist, md, md_list);
  166. TAILQ_INSERT_TAIL(&debug_malloc_freelist, md, md_list);
  167. debug_malloc_chunks_on_freelist++;
  168. /*
  169. * unmap the page.
  170. */
  171. pmap_kremove(md->md_va, PAGE_SIZE);
  172. pmap_update(pmap_kernel());
  173. splx(s);
  174. return (1);
  175. }
  176. void
  177. debug_malloc_init(void)
  178. {
  179. TAILQ_INIT(&debug_malloc_freelist);
  180. TAILQ_INIT(&debug_malloc_usedlist);
  181. debug_malloc_allocs = 0;
  182. debug_malloc_frees = 0;
  183. debug_malloc_pages = 0;
  184. debug_malloc_chunks_on_freelist = 0;
  185. pool_init(&debug_malloc_pool, sizeof(struct debug_malloc_entry),
  186. 0, 0, 0, "mdbepl", NULL);
  187. debug_malloc_initialized = 1;
  188. }
  189. /*
  190. * Add one chunk to the freelist.
  191. *
  192. * called at splvm.
  193. */
  194. void
  195. debug_malloc_allocate_free(int wait)
  196. {
  197. vaddr_t va, offset;
  198. struct vm_page *pg;
  199. struct debug_malloc_entry *md;
  200. splassert(IPL_VM);
  201. md = pool_get(&debug_malloc_pool, wait ? PR_WAITOK : PR_NOWAIT);
  202. if (md == NULL)
  203. return;
  204. va = uvm_km_kmemalloc(kmem_map, NULL, PAGE_SIZE * 2,
  205. UVM_KMF_VALLOC | (wait ? 0: UVM_KMF_NOWAIT));
  206. if (va == 0) {
  207. pool_put(&debug_malloc_pool, md);
  208. return;
  209. }
  210. offset = va - vm_map_min(kernel_map);
  211. for (;;) {
  212. pg = uvm_pagealloc(NULL, 0, NULL, 0);
  213. if (pg) {
  214. atomic_clearbits_int(&pg->pg_flags, PG_BUSY);
  215. UVM_PAGE_OWN(pg, NULL);
  216. }
  217. if (pg)
  218. break;
  219. if (wait == 0) {
  220. uvm_unmap(kmem_map, va, va + PAGE_SIZE * 2);
  221. pool_put(&debug_malloc_pool, md);
  222. return;
  223. }
  224. uvm_wait("debug_malloc");
  225. }
  226. md->md_va = va;
  227. md->md_pa = VM_PAGE_TO_PHYS(pg);
  228. debug_malloc_pages++;
  229. TAILQ_INSERT_HEAD(&debug_malloc_freelist, md, md_list);
  230. debug_malloc_chunks_on_freelist++;
  231. }
  232. void
  233. debug_malloc_print(void)
  234. {
  235. debug_malloc_printit(printf, 0);
  236. }
  237. void
  238. debug_malloc_assert_allocated(void *addr, const char *func)
  239. {
  240. struct debug_malloc_entry *md;
  241. vaddr_t va = (vaddr_t)addr;
  242. TAILQ_FOREACH(md, &debug_malloc_freelist, md_list) {
  243. if (va >= md->md_va &&
  244. va < md->md_va + 2 * PAGE_SIZE)
  245. panic("debug_malloc: (%s): %p - freed", func, addr);
  246. }
  247. TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list) {
  248. if (va >= md->md_va + PAGE_SIZE &&
  249. va < md->md_va + 2 * PAGE_SIZE)
  250. panic("debug_malloc: (%s): %p - overflow", func, addr);
  251. }
  252. }
  253. void
  254. debug_malloc_printit(
  255. int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))),
  256. vaddr_t addr)
  257. {
  258. struct debug_malloc_entry *md;
  259. if (addr) {
  260. TAILQ_FOREACH(md, &debug_malloc_freelist, md_list) {
  261. if (addr >= md->md_va &&
  262. addr < md->md_va + 2 * PAGE_SIZE) {
  263. (*pr)("Memory at address 0x%lx is in a freed "
  264. "area. type %d, size: %d\n ",
  265. addr, md->md_type, md->md_size);
  266. return;
  267. }
  268. }
  269. TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list) {
  270. if (addr >= md->md_va + PAGE_SIZE &&
  271. addr < md->md_va + 2 * PAGE_SIZE) {
  272. (*pr)("Memory at address 0x%lx is just outside "
  273. "an allocated area. type %d, size: %d\n",
  274. addr, md->md_type, md->md_size);
  275. return;
  276. }
  277. }
  278. (*pr)("Memory at address 0x%lx is outside debugged malloc.\n",
  279. addr);
  280. return;
  281. }
  282. (*pr)("allocs: %d\n", debug_malloc_allocs);
  283. (*pr)("frees: %d\n", debug_malloc_frees);
  284. (*pr)("pages used: %d\n", debug_malloc_pages);
  285. (*pr)("chunks on freelist: %d\n", debug_malloc_chunks_on_freelist);
  286. (*pr)("\taddr:\tsize:\n");
  287. (*pr)("free chunks:\n");
  288. TAILQ_FOREACH(md, &debug_malloc_freelist, md_list)
  289. (*pr)("\t0x%lx\t0x%lx\t%d\n", md->md_va, md->md_size,
  290. md->md_type);
  291. (*pr)("used chunks:\n");
  292. TAILQ_FOREACH(md, &debug_malloc_usedlist, md_list)
  293. (*pr)("\t0x%lx\t0x%lx\t%d\n", md->md_va, md->md_size,
  294. md->md_type);
  295. }