kern_malloc.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732
  1. /* $OpenBSD: kern_malloc.c,v 1.128 2015/03/14 03:38:50 jsg Exp $ */
  2. /* $NetBSD: kern_malloc.c,v 1.15.4.2 1996/06/13 17:10:56 cgd Exp $ */
  3. /*
  4. * Copyright (c) 1987, 1991, 1993
  5. * The Regents of the University of California. All rights reserved.
  6. *
  7. * Redistribution and use in source and binary forms, with or without
  8. * modification, are permitted provided that the following conditions
  9. * are met:
  10. * 1. Redistributions of source code must retain the above copyright
  11. * notice, this list of conditions and the following disclaimer.
  12. * 2. Redistributions in binary form must reproduce the above copyright
  13. * notice, this list of conditions and the following disclaimer in the
  14. * documentation and/or other materials provided with the distribution.
  15. * 3. Neither the name of the University nor the names of its contributors
  16. * may be used to endorse or promote products derived from this software
  17. * without specific prior written permission.
  18. *
  19. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  20. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  21. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  22. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  23. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  24. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  25. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  26. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  27. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  28. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  29. * SUCH DAMAGE.
  30. *
  31. * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94
  32. */
  33. #include <sys/param.h>
  34. #include <sys/kernel.h>
  35. #include <sys/malloc.h>
  36. #include <sys/stdint.h>
  37. #include <sys/systm.h>
  38. #include <sys/sysctl.h>
  39. #include <sys/time.h>
  40. #include <sys/rwlock.h>
  41. #include <uvm/uvm_extern.h>
  42. static
  43. #ifndef SMALL_KERNEL
  44. __inline__
  45. #endif
  46. long BUCKETINDX(size_t sz)
  47. {
  48. long b, d;
  49. /* note that this relies upon MINALLOCSIZE being 1 << MINBUCKET */
  50. b = 7 + MINBUCKET; d = 4;
  51. while (d != 0) {
  52. if (sz <= (1 << b))
  53. b -= d;
  54. else
  55. b += d;
  56. d >>= 1;
  57. }
  58. if (sz <= (1 << b))
  59. b += 0;
  60. else
  61. b += 1;
  62. return b;
  63. }
  64. static struct vm_map kmem_map_store;
  65. struct vm_map *kmem_map = NULL;
  66. /*
  67. * Default number of pages in kmem_map. We attempt to calculate this
  68. * at run-time, but allow it to be either patched or set in the kernel
  69. * config file.
  70. */
  71. #ifndef NKMEMPAGES
  72. #define NKMEMPAGES 0
  73. #endif
  74. u_int nkmempages = NKMEMPAGES;
  75. /*
  76. * Defaults for lower- and upper-bounds for the kmem_map page count.
  77. * Can be overridden by kernel config options.
  78. */
  79. #ifndef NKMEMPAGES_MIN
  80. #define NKMEMPAGES_MIN 0
  81. #endif
  82. u_int nkmempages_min = 0;
  83. #ifndef NKMEMPAGES_MAX
  84. #define NKMEMPAGES_MAX NKMEMPAGES_MAX_DEFAULT
  85. #endif
  86. u_int nkmempages_max = 0;
  87. struct kmembuckets bucket[MINBUCKET + 16];
  88. #ifdef KMEMSTATS
  89. struct kmemstats kmemstats[M_LAST];
  90. #endif
  91. struct kmemusage *kmemusage;
  92. char *kmembase, *kmemlimit;
  93. char buckstring[16 * sizeof("123456,")];
  94. int buckstring_init = 0;
  95. #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
  96. char *memname[] = INITKMEMNAMES;
  97. char *memall = NULL;
  98. struct rwlock sysctl_kmemlock = RWLOCK_INITIALIZER("sysctlklk");
  99. #endif
  100. /*
  101. * Normally the freelist structure is used only to hold the list pointer
  102. * for free objects. However, when running with diagnostics, the first
  103. * 8 bytes of the structure is unused except for diagnostic information,
  104. * and the free list pointer is at offset 8 in the structure. Since the
  105. * first 8 bytes is the portion of the structure most often modified, this
  106. * helps to detect memory reuse problems and avoid free list corruption.
  107. */
  108. struct kmem_freelist {
  109. int32_t kf_spare0;
  110. int16_t kf_type;
  111. int16_t kf_spare1;
  112. XSIMPLEQ_ENTRY(kmem_freelist) kf_flist;
  113. };
  114. #ifdef DIAGNOSTIC
  115. /*
  116. * This structure provides a set of masks to catch unaligned frees.
  117. */
  118. const long addrmask[] = { 0,
  119. 0x00000001, 0x00000003, 0x00000007, 0x0000000f,
  120. 0x0000001f, 0x0000003f, 0x0000007f, 0x000000ff,
  121. 0x000001ff, 0x000003ff, 0x000007ff, 0x00000fff,
  122. 0x00001fff, 0x00003fff, 0x00007fff, 0x0000ffff,
  123. };
  124. #endif /* DIAGNOSTIC */
  125. #ifndef SMALL_KERNEL
  126. struct timeval malloc_errintvl = { 5, 0 };
  127. struct timeval malloc_lasterr;
  128. #endif
  129. /*
  130. * Allocate a block of memory
  131. */
  132. void *
  133. malloc(size_t size, int type, int flags)
  134. {
  135. struct kmembuckets *kbp;
  136. struct kmemusage *kup;
  137. struct kmem_freelist *freep;
  138. long indx, npg, allocsize;
  139. int s;
  140. caddr_t va, cp;
  141. #ifdef DIAGNOSTIC
  142. int freshalloc;
  143. char *savedtype;
  144. #endif
  145. #ifdef KMEMSTATS
  146. struct kmemstats *ksp = &kmemstats[type];
  147. if (((unsigned long)type) <= 1 || ((unsigned long)type) >= M_LAST)
  148. panic("malloc: bogus type %d", type);
  149. #endif
  150. KASSERT(flags & (M_WAITOK | M_NOWAIT));
  151. if ((flags & M_NOWAIT) == 0) {
  152. extern int pool_debug;
  153. #ifdef DIAGNOSTIC
  154. assertwaitok();
  155. if (pool_debug == 2)
  156. yield();
  157. #endif
  158. if (!cold && pool_debug) {
  159. KERNEL_UNLOCK();
  160. KERNEL_LOCK();
  161. }
  162. }
  163. #ifdef MALLOC_DEBUG
  164. if (debug_malloc(size, type, flags, (void **)&va)) {
  165. if ((flags & M_ZERO) && va != NULL)
  166. memset(va, 0, size);
  167. return (va);
  168. }
  169. #endif
  170. if (size > 65535 * PAGE_SIZE) {
  171. if (flags & M_CANFAIL) {
  172. #ifndef SMALL_KERNEL
  173. if (ratecheck(&malloc_lasterr, &malloc_errintvl))
  174. printf("malloc(): allocation too large, "
  175. "type = %d, size = %lu\n", type, size);
  176. #endif
  177. return (NULL);
  178. } else
  179. panic("malloc: allocation too large, "
  180. "type = %d, size = %lu\n", type, size);
  181. }
  182. indx = BUCKETINDX(size);
  183. kbp = &bucket[indx];
  184. s = splvm();
  185. #ifdef KMEMSTATS
  186. while (ksp->ks_memuse >= ksp->ks_limit) {
  187. if (flags & M_NOWAIT) {
  188. splx(s);
  189. return (NULL);
  190. }
  191. if (ksp->ks_limblocks < 65535)
  192. ksp->ks_limblocks++;
  193. tsleep(ksp, PSWP+2, memname[type], 0);
  194. }
  195. ksp->ks_size |= 1 << indx;
  196. #endif
  197. if (size > MAXALLOCSAVE)
  198. allocsize = round_page(size);
  199. else
  200. allocsize = 1 << indx;
  201. if (XSIMPLEQ_FIRST(&kbp->kb_freelist) == NULL) {
  202. npg = atop(round_page(allocsize));
  203. va = (caddr_t)uvm_km_kmemalloc_pla(kmem_map, NULL,
  204. (vsize_t)ptoa(npg), 0,
  205. ((flags & M_NOWAIT) ? UVM_KMF_NOWAIT : 0) |
  206. ((flags & M_CANFAIL) ? UVM_KMF_CANFAIL : 0),
  207. no_constraint.ucr_low, no_constraint.ucr_high,
  208. 0, 0, 0);
  209. if (va == NULL) {
  210. /*
  211. * Kmem_malloc() can return NULL, even if it can
  212. * wait, if there is no map space available, because
  213. * it can't fix that problem. Neither can we,
  214. * right now. (We should release pages which
  215. * are completely free and which are in buckets
  216. * with too many free elements.)
  217. */
  218. if ((flags & (M_NOWAIT|M_CANFAIL)) == 0)
  219. panic("malloc: out of space in kmem_map");
  220. splx(s);
  221. return (NULL);
  222. }
  223. #ifdef KMEMSTATS
  224. kbp->kb_total += kbp->kb_elmpercl;
  225. #endif
  226. kup = btokup(va);
  227. kup->ku_indx = indx;
  228. #ifdef DIAGNOSTIC
  229. freshalloc = 1;
  230. #endif
  231. if (allocsize > MAXALLOCSAVE) {
  232. kup->ku_pagecnt = npg;
  233. #ifdef KMEMSTATS
  234. ksp->ks_memuse += allocsize;
  235. #endif
  236. goto out;
  237. }
  238. #ifdef KMEMSTATS
  239. kup->ku_freecnt = kbp->kb_elmpercl;
  240. kbp->kb_totalfree += kbp->kb_elmpercl;
  241. #endif
  242. cp = va + (npg * PAGE_SIZE) - allocsize;
  243. for (;;) {
  244. freep = (struct kmem_freelist *)cp;
  245. #ifdef DIAGNOSTIC
  246. /*
  247. * Copy in known text to detect modification
  248. * after freeing.
  249. */
  250. poison_mem(cp, allocsize);
  251. freep->kf_type = M_FREE;
  252. #endif /* DIAGNOSTIC */
  253. XSIMPLEQ_INSERT_HEAD(&kbp->kb_freelist, freep, kf_flist);
  254. if (cp <= va)
  255. break;
  256. cp -= allocsize;
  257. }
  258. } else {
  259. #ifdef DIAGNOSTIC
  260. freshalloc = 0;
  261. #endif
  262. }
  263. freep = XSIMPLEQ_FIRST(&kbp->kb_freelist);
  264. XSIMPLEQ_REMOVE_HEAD(&kbp->kb_freelist, kf_flist);
  265. va = (caddr_t)freep;
  266. #ifdef DIAGNOSTIC
  267. savedtype = (unsigned)freep->kf_type < M_LAST ?
  268. memname[freep->kf_type] : "???";
  269. if (freshalloc == 0 && XSIMPLEQ_FIRST(&kbp->kb_freelist)) {
  270. int rv;
  271. vaddr_t addr = (vaddr_t)XSIMPLEQ_FIRST(&kbp->kb_freelist);
  272. vm_map_lock(kmem_map);
  273. rv = uvm_map_checkprot(kmem_map, addr,
  274. addr + sizeof(struct kmem_freelist), PROT_WRITE);
  275. vm_map_unlock(kmem_map);
  276. if (!rv) {
  277. printf("%s %zd of object %p size 0x%lx %s %s"
  278. " (invalid addr %p)\n",
  279. "Data modified on freelist: word",
  280. (int32_t *)&addr - (int32_t *)kbp, va, size,
  281. "previous type", savedtype, (void *)addr);
  282. }
  283. }
  284. /* Fill the fields that we've used with poison */
  285. poison_mem(freep, sizeof(*freep));
  286. /* and check that the data hasn't been modified. */
  287. if (freshalloc == 0) {
  288. size_t pidx;
  289. uint32_t pval;
  290. if (poison_check(va, allocsize, &pidx, &pval)) {
  291. panic("%s %zd of object %p size 0x%lx %s %s"
  292. " (0x%x != 0x%x)\n",
  293. "Data modified on freelist: word",
  294. pidx, va, size, "previous type",
  295. savedtype, ((int32_t*)va)[pidx], pval);
  296. }
  297. }
  298. freep->kf_spare0 = 0;
  299. #endif /* DIAGNOSTIC */
  300. #ifdef KMEMSTATS
  301. kup = btokup(va);
  302. if (kup->ku_indx != indx)
  303. panic("malloc: wrong bucket");
  304. if (kup->ku_freecnt == 0)
  305. panic("malloc: lost data");
  306. kup->ku_freecnt--;
  307. kbp->kb_totalfree--;
  308. ksp->ks_memuse += 1 << indx;
  309. out:
  310. kbp->kb_calls++;
  311. ksp->ks_inuse++;
  312. ksp->ks_calls++;
  313. if (ksp->ks_memuse > ksp->ks_maxused)
  314. ksp->ks_maxused = ksp->ks_memuse;
  315. #else
  316. out:
  317. #endif
  318. splx(s);
  319. if ((flags & M_ZERO) && va != NULL)
  320. memset(va, 0, size);
  321. return (va);
  322. }
  323. /*
  324. * Free a block of memory allocated by malloc.
  325. */
  326. void
  327. free(void *addr, int type, size_t freedsize)
  328. {
  329. struct kmembuckets *kbp;
  330. struct kmemusage *kup;
  331. struct kmem_freelist *freep;
  332. long size;
  333. int s;
  334. #ifdef DIAGNOSTIC
  335. long alloc;
  336. #endif
  337. #ifdef KMEMSTATS
  338. struct kmemstats *ksp = &kmemstats[type];
  339. #endif
  340. if (addr == NULL)
  341. return;
  342. #ifdef MALLOC_DEBUG
  343. if (debug_free(addr, type))
  344. return;
  345. #endif
  346. #ifdef DIAGNOSTIC
  347. if (addr < (void *)kmembase || addr >= (void *)kmemlimit)
  348. panic("free: non-malloced addr %p type %s", addr,
  349. memname[type]);
  350. #endif
  351. kup = btokup(addr);
  352. size = 1 << kup->ku_indx;
  353. kbp = &bucket[kup->ku_indx];
  354. if (size > MAXALLOCSAVE)
  355. size = kup->ku_pagecnt << PAGE_SHIFT;
  356. s = splvm();
  357. #ifdef DIAGNOSTIC
  358. if (freedsize != 0 && freedsize > size)
  359. panic("free: size too large %zu > %ld (%p) type %s",
  360. freedsize, size, addr, memname[type]);
  361. if (freedsize != 0 && size > MINALLOCSIZE && freedsize < size / 2)
  362. panic("free: size too small %zu < %ld / 2 (%p) type %s",
  363. freedsize, size, addr, memname[type]);
  364. /*
  365. * Check for returns of data that do not point to the
  366. * beginning of the allocation.
  367. */
  368. if (size > PAGE_SIZE)
  369. alloc = addrmask[BUCKETINDX(PAGE_SIZE)];
  370. else
  371. alloc = addrmask[kup->ku_indx];
  372. if (((u_long)addr & alloc) != 0)
  373. panic("free: unaligned addr %p, size %ld, type %s, mask %ld",
  374. addr, size, memname[type], alloc);
  375. #endif /* DIAGNOSTIC */
  376. if (size > MAXALLOCSAVE) {
  377. uvm_km_free(kmem_map, (vaddr_t)addr, ptoa(kup->ku_pagecnt));
  378. #ifdef KMEMSTATS
  379. ksp->ks_memuse -= size;
  380. kup->ku_indx = 0;
  381. kup->ku_pagecnt = 0;
  382. if (ksp->ks_memuse + size >= ksp->ks_limit &&
  383. ksp->ks_memuse < ksp->ks_limit)
  384. wakeup(ksp);
  385. ksp->ks_inuse--;
  386. kbp->kb_total -= 1;
  387. #endif
  388. splx(s);
  389. return;
  390. }
  391. freep = (struct kmem_freelist *)addr;
  392. #ifdef DIAGNOSTIC
  393. /*
  394. * Check for multiple frees. Use a quick check to see if
  395. * it looks free before laboriously searching the freelist.
  396. */
  397. if (freep->kf_spare0 == poison_value(freep)) {
  398. struct kmem_freelist *fp;
  399. XSIMPLEQ_FOREACH(fp, &kbp->kb_freelist, kf_flist) {
  400. if (addr != fp)
  401. continue;
  402. printf("multiply freed item %p\n", addr);
  403. panic("free: duplicated free");
  404. }
  405. }
  406. /*
  407. * Copy in known text to detect modification after freeing
  408. * and to make it look free. Also, save the type being freed
  409. * so we can list likely culprit if modification is detected
  410. * when the object is reallocated.
  411. */
  412. poison_mem(addr, size);
  413. freep->kf_spare0 = poison_value(freep);
  414. freep->kf_type = type;
  415. #endif /* DIAGNOSTIC */
  416. #ifdef KMEMSTATS
  417. kup->ku_freecnt++;
  418. if (kup->ku_freecnt >= kbp->kb_elmpercl) {
  419. if (kup->ku_freecnt > kbp->kb_elmpercl)
  420. panic("free: multiple frees");
  421. else if (kbp->kb_totalfree > kbp->kb_highwat)
  422. kbp->kb_couldfree++;
  423. }
  424. kbp->kb_totalfree++;
  425. ksp->ks_memuse -= size;
  426. if (ksp->ks_memuse + size >= ksp->ks_limit &&
  427. ksp->ks_memuse < ksp->ks_limit)
  428. wakeup(ksp);
  429. ksp->ks_inuse--;
  430. #endif
  431. XSIMPLEQ_INSERT_TAIL(&kbp->kb_freelist, freep, kf_flist);
  432. splx(s);
  433. }
  434. /*
  435. * Compute the number of pages that kmem_map will map, that is,
  436. * the size of the kernel malloc arena.
  437. */
  438. void
  439. kmeminit_nkmempages(void)
  440. {
  441. u_int npages;
  442. if (nkmempages != 0) {
  443. /*
  444. * It's already been set (by us being here before, or
  445. * by patching or kernel config options), bail out now.
  446. */
  447. return;
  448. }
  449. /*
  450. * We can't initialize these variables at compilation time, since
  451. * the page size may not be known (on sparc GENERIC kernels, for
  452. * example). But we still want the MD code to be able to provide
  453. * better values.
  454. */
  455. if (nkmempages_min == 0)
  456. nkmempages_min = NKMEMPAGES_MIN;
  457. if (nkmempages_max == 0)
  458. nkmempages_max = NKMEMPAGES_MAX;
  459. /*
  460. * We use the following (simple) formula:
  461. *
  462. * - Starting point is physical memory / 4.
  463. *
  464. * - Clamp it down to nkmempages_max.
  465. *
  466. * - Round it up to nkmempages_min.
  467. */
  468. npages = physmem / 4;
  469. if (npages > nkmempages_max)
  470. npages = nkmempages_max;
  471. if (npages < nkmempages_min)
  472. npages = nkmempages_min;
  473. nkmempages = npages;
  474. }
  475. /*
  476. * Initialize the kernel memory allocator
  477. */
  478. void
  479. kmeminit(void)
  480. {
  481. vaddr_t base, limit;
  482. long indx;
  483. #ifdef DIAGNOSTIC
  484. if (sizeof(struct kmem_freelist) > (1 << MINBUCKET))
  485. panic("kmeminit: minbucket too small/struct freelist too big");
  486. #endif
  487. /*
  488. * Compute the number of kmem_map pages, if we have not
  489. * done so already.
  490. */
  491. kmeminit_nkmempages();
  492. base = vm_map_min(kernel_map);
  493. kmem_map = uvm_km_suballoc(kernel_map, &base, &limit,
  494. (vsize_t)nkmempages << PAGE_SHIFT,
  495. #ifdef KVA_GUARDPAGES
  496. VM_MAP_INTRSAFE | VM_MAP_GUARDPAGES,
  497. #else
  498. VM_MAP_INTRSAFE,
  499. #endif
  500. FALSE, &kmem_map_store);
  501. kmembase = (char *)base;
  502. kmemlimit = (char *)limit;
  503. kmemusage = (struct kmemusage *) uvm_km_zalloc(kernel_map,
  504. (vsize_t)(nkmempages * sizeof(struct kmemusage)));
  505. for (indx = 0; indx < MINBUCKET + 16; indx++) {
  506. XSIMPLEQ_INIT(&bucket[indx].kb_freelist);
  507. }
  508. #ifdef KMEMSTATS
  509. for (indx = 0; indx < MINBUCKET + 16; indx++) {
  510. if (1 << indx >= PAGE_SIZE)
  511. bucket[indx].kb_elmpercl = 1;
  512. else
  513. bucket[indx].kb_elmpercl = PAGE_SIZE / (1 << indx);
  514. bucket[indx].kb_highwat = 5 * bucket[indx].kb_elmpercl;
  515. }
  516. for (indx = 0; indx < M_LAST; indx++)
  517. kmemstats[indx].ks_limit = nkmempages * PAGE_SIZE * 6 / 10;
  518. #endif
  519. #ifdef MALLOC_DEBUG
  520. debug_malloc_init();
  521. #endif
  522. }
  523. /*
  524. * Return kernel malloc statistics information.
  525. */
  526. int
  527. sysctl_malloc(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
  528. size_t newlen, struct proc *p)
  529. {
  530. struct kmembuckets kb;
  531. #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
  532. int error;
  533. #endif
  534. int i, siz;
  535. if (namelen != 2 && name[0] != KERN_MALLOC_BUCKETS &&
  536. name[0] != KERN_MALLOC_KMEMNAMES)
  537. return (ENOTDIR); /* overloaded */
  538. switch (name[0]) {
  539. case KERN_MALLOC_BUCKETS:
  540. /* Initialize the first time */
  541. if (buckstring_init == 0) {
  542. buckstring_init = 1;
  543. memset(buckstring, 0, sizeof(buckstring));
  544. for (siz = 0, i = MINBUCKET; i < MINBUCKET + 16; i++) {
  545. snprintf(buckstring + siz,
  546. sizeof buckstring - siz,
  547. "%d,", (u_int)(1<<i));
  548. siz += strlen(buckstring + siz);
  549. }
  550. /* Remove trailing comma */
  551. if (siz)
  552. buckstring[siz - 1] = '\0';
  553. }
  554. return (sysctl_rdstring(oldp, oldlenp, newp, buckstring));
  555. case KERN_MALLOC_BUCKET:
  556. memcpy(&kb, &bucket[BUCKETINDX(name[1])], sizeof(kb));
  557. memset(&kb.kb_freelist, 0, sizeof(kb.kb_freelist));
  558. return (sysctl_rdstruct(oldp, oldlenp, newp, &kb, sizeof(kb)));
  559. case KERN_MALLOC_KMEMSTATS:
  560. #ifdef KMEMSTATS
  561. if ((name[1] < 0) || (name[1] >= M_LAST))
  562. return (EINVAL);
  563. return (sysctl_rdstruct(oldp, oldlenp, newp,
  564. &kmemstats[name[1]], sizeof(struct kmemstats)));
  565. #else
  566. return (EOPNOTSUPP);
  567. #endif
  568. case KERN_MALLOC_KMEMNAMES:
  569. #if defined(KMEMSTATS) || defined(DIAGNOSTIC) || defined(FFS_SOFTUPDATES)
  570. error = rw_enter(&sysctl_kmemlock, RW_WRITE|RW_INTR);
  571. if (error)
  572. return (error);
  573. if (memall == NULL) {
  574. int totlen;
  575. /* Figure out how large a buffer we need */
  576. for (totlen = 0, i = 0; i < M_LAST; i++) {
  577. if (memname[i])
  578. totlen += strlen(memname[i]);
  579. totlen++;
  580. }
  581. memall = malloc(totlen + M_LAST, M_SYSCTL,
  582. M_WAITOK|M_ZERO);
  583. for (siz = 0, i = 0; i < M_LAST; i++) {
  584. snprintf(memall + siz,
  585. totlen + M_LAST - siz,
  586. "%s,", memname[i] ? memname[i] : "");
  587. siz += strlen(memall + siz);
  588. }
  589. /* Remove trailing comma */
  590. if (siz)
  591. memall[siz - 1] = '\0';
  592. /* Now, convert all spaces to underscores */
  593. for (i = 0; i < totlen; i++)
  594. if (memall[i] == ' ')
  595. memall[i] = '_';
  596. }
  597. rw_exit_write(&sysctl_kmemlock);
  598. return (sysctl_rdstring(oldp, oldlenp, newp, memall));
  599. #else
  600. return (EOPNOTSUPP);
  601. #endif
  602. default:
  603. return (EOPNOTSUPP);
  604. }
  605. /* NOTREACHED */
  606. }
  607. /*
  608. * Round up a size to how much malloc would actually allocate.
  609. */
  610. size_t
  611. malloc_roundup(size_t sz)
  612. {
  613. if (sz > MAXALLOCSAVE)
  614. return round_page(sz);
  615. return (1 << BUCKETINDX(sz));
  616. }
  617. #if defined(DDB)
  618. #include <machine/db_machdep.h>
  619. #include <ddb/db_output.h>
  620. void
  621. malloc_printit(
  622. int (*pr)(const char *, ...) __attribute__((__format__(__kprintf__,1,2))))
  623. {
  624. #ifdef KMEMSTATS
  625. struct kmemstats *km;
  626. int i;
  627. (*pr)("%15s %5s %6s %7s %6s %9s %8s %8s\n",
  628. "Type", "InUse", "MemUse", "HighUse", "Limit", "Requests",
  629. "Type Lim", "Kern Lim");
  630. for (i = 0, km = kmemstats; i < M_LAST; i++, km++) {
  631. if (!km->ks_calls || !memname[i])
  632. continue;
  633. (*pr)("%15s %5ld %6ldK %7ldK %6ldK %9ld %8d %8d\n",
  634. memname[i], km->ks_inuse, km->ks_memuse / 1024,
  635. km->ks_maxused / 1024, km->ks_limit / 1024,
  636. km->ks_calls, km->ks_limblocks, km->ks_mapblocks);
  637. }
  638. #else
  639. (*pr)("No KMEMSTATS compiled in\n");
  640. #endif
  641. }
  642. #endif /* DDB */
  643. /*
  644. * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net>
  645. *
  646. * Permission to use, copy, modify, and distribute this software for any
  647. * purpose with or without fee is hereby granted, provided that the above
  648. * copyright notice and this permission notice appear in all copies.
  649. *
  650. * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  651. * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  652. * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
  653. * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
  654. * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
  655. * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
  656. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  657. */
  658. /*
  659. * This is sqrt(SIZE_MAX+1), as s1*s2 <= SIZE_MAX
  660. * if both s1 < MUL_NO_OVERFLOW and s2 < MUL_NO_OVERFLOW
  661. */
  662. #define MUL_NO_OVERFLOW (1UL << (sizeof(size_t) * 4))
  663. void *
  664. mallocarray(size_t nmemb, size_t size, int type, int flags)
  665. {
  666. if ((nmemb >= MUL_NO_OVERFLOW || size >= MUL_NO_OVERFLOW) &&
  667. nmemb > 0 && SIZE_MAX / nmemb < size) {
  668. if (flags & M_CANFAIL)
  669. return (NULL);
  670. panic("mallocarray: overflow %zu * %zu", nmemb, size);
  671. }
  672. return (malloc(size * nmemb, type, flags));
  673. }