kmem.c 36 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434
  1. /*
  2. * Copyright (c) 2010-2018 Richard Braun.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. *
  18. * This allocator is based on the paper "The Slab Allocator: An Object-Caching
  19. * Kernel Memory Allocator" by Jeff Bonwick.
  20. *
  21. * It allows the allocation of objects (i.e. fixed-size typed buffers) from
  22. * caches and is efficient in both space and time. This implementation follows
  23. * many of the indications from the paper mentioned. The most notable
  24. * differences are outlined below.
  25. *
  26. * The per-cache self-scaling hash table for buffer-to-bufctl conversion,
  27. * described in 3.2.3 "Slab Layout for Large Objects", has been replaced with
  28. * a constant time buffer-to-slab lookup that relies on the VM system.
  29. *
  30. * Slabs are allocated from the physical page allocator if they're page-sized,
  31. * and from kernel virtual memory if they're bigger, in order to prevent
  32. * physical memory fragmentation from making slab allocations fail.
  33. *
  34. * This implementation uses per-CPU pools of objects, which service most
  35. * allocation requests. These pools act as caches (but are named differently
  36. * to avoid confusion with CPU caches) that reduce contention on multiprocessor
  37. * systems. When a pool is empty and cannot provide an object, it is filled by
  38. * transferring multiple objects from the slab layer. The symmetric case is
  39. * handled likewise.
  40. *
  41. * TODO Rework the CPU pool layer to use the SLQB algorithm by Nick Piggin.
  42. */
  43. #include <assert.h>
  44. #include <limits.h>
  45. #include <stdbool.h>
  46. #include <stddef.h>
  47. #include <stdint.h>
  48. #include <stdio.h>
  49. #include <string.h>
  50. #include <kern/init.h>
  51. #include <kern/list.h>
  52. #include <kern/log.h>
  53. #include <kern/log2.h>
  54. #include <kern/kmem.h>
  55. #include <kern/kmem_i.h>
  56. #include <kern/macros.h>
  57. #include <kern/mutex.h>
  58. #include <kern/panic.h>
  59. #include <kern/shell.h>
  60. #include <kern/thread.h>
  61. #include <machine/cpu.h>
  62. #include <machine/page.h>
  63. #include <machine/pmap.h>
  64. #include <vm/vm_kmem.h>
  65. #include <vm/vm_page.h>
  66. /*
  67. * Minimum required alignment.
  68. */
  69. #define KMEM_ALIGN_MIN 8
  70. /*
  71. * Minimum number of buffers per slab.
  72. *
  73. * This value is ignored when the slab size exceeds a threshold.
  74. */
  75. #define KMEM_MIN_BUFS_PER_SLAB 8
  76. /*
  77. * Special slab size beyond which the minimum number of buffers per slab is
  78. * ignored when computing the slab size of a cache.
  79. */
  80. #define KMEM_SLAB_SIZE_THRESHOLD (8 * PAGE_SIZE)
  81. /*
  82. * Special buffer size under which slab data is unconditionally allocated
  83. * from its associated slab.
  84. */
  85. #define KMEM_BUF_SIZE_THRESHOLD (PAGE_SIZE / 8)
  86. /*
  87. * The transfer size of a CPU pool is computed by dividing the pool size by
  88. * this value.
  89. */
  90. #define KMEM_CPU_POOL_TRANSFER_RATIO 2
  91. /*
  92. * Logarithm of the size of the smallest general cache.
  93. */
  94. #define KMEM_CACHES_FIRST_ORDER 5
  95. /*
  96. * Number of caches backing general purpose allocations.
  97. */
  98. #define KMEM_NR_MEM_CACHES 13
  99. /*
  100. * Options for kmem_cache_alloc_verify().
  101. */
  102. #define KMEM_AV_NOCONSTRUCT 0
  103. #define KMEM_AV_CONSTRUCT 1
  104. /*
  105. * Error codes for kmem_cache_error().
  106. */
  107. #define KMEM_ERR_INVALID 0 /* Invalid address being freed */
  108. #define KMEM_ERR_DOUBLEFREE 1 /* Freeing already free address */
  109. #define KMEM_ERR_BUFTAG 2 /* Invalid buftag content */
  110. #define KMEM_ERR_MODIFIED 3 /* Buffer modified while free */
  111. #define KMEM_ERR_REDZONE 4 /* Redzone violation */
  112. #ifdef KMEM_USE_CPU_LAYER
  113. /*
  114. * Available CPU pool types.
  115. *
  116. * For each entry, the CPU pool size applies from the entry buf_size
  117. * (excluded) up to (and including) the buf_size of the preceding entry.
  118. *
  119. * See struct kmem_cpu_pool_type for a description of the values.
  120. */
  121. static struct kmem_cpu_pool_type kmem_cpu_pool_types[] __read_mostly = {
  122. { 32768, 1, 0, NULL },
  123. { 4096, 8, CPU_L1_SIZE, NULL },
  124. { 256, 64, CPU_L1_SIZE, NULL },
  125. { 0, 128, CPU_L1_SIZE, NULL }
  126. };
  127. /*
  128. * Caches where CPU pool arrays are allocated from.
  129. */
  130. static struct kmem_cache kmem_cpu_array_caches[ARRAY_SIZE(kmem_cpu_pool_types)];
  131. #endif /* KMEM_USE_CPU_LAYER */
  132. /*
  133. * Cache for off slab data.
  134. */
  135. static struct kmem_cache kmem_slab_cache;
  136. /*
  137. * General caches array.
  138. */
  139. static struct kmem_cache kmem_caches[KMEM_NR_MEM_CACHES];
  140. /*
  141. * List of all caches managed by the allocator.
  142. */
  143. static struct list kmem_cache_list;
  144. static struct mutex kmem_cache_list_lock;
  145. static void kmem_cache_error(struct kmem_cache *cache, void *buf, int error,
  146. void *arg);
  147. static void * kmem_cache_alloc_from_slab(struct kmem_cache *cache);
  148. static void kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf);
  149. static void *
  150. kmem_buf_verify_bytes(void *buf, void *pattern, size_t size)
  151. {
  152. char *ptr, *pattern_ptr, *end;
  153. end = buf + size;
  154. for (ptr = buf, pattern_ptr = pattern; ptr < end; ptr++, pattern_ptr++) {
  155. if (*ptr != *pattern_ptr) {
  156. return ptr;
  157. }
  158. }
  159. return NULL;
  160. }
  161. static void
  162. kmem_buf_fill(void *buf, uint64_t pattern, size_t size)
  163. {
  164. uint64_t *ptr, *end;
  165. assert(P2ALIGNED((uintptr_t)buf, sizeof(uint64_t)));
  166. assert(P2ALIGNED(size, sizeof(uint64_t)));
  167. end = buf + size;
  168. for (ptr = buf; ptr < end; ptr++) {
  169. *ptr = pattern;
  170. }
  171. }
  172. static void *
  173. kmem_buf_verify_fill(void *buf, uint64_t old, uint64_t new, size_t size)
  174. {
  175. uint64_t *ptr, *end;
  176. assert(P2ALIGNED((uintptr_t)buf, sizeof(uint64_t)));
  177. assert(P2ALIGNED(size, sizeof(uint64_t)));
  178. end = buf + size;
  179. for (ptr = buf; ptr < end; ptr++) {
  180. if (*ptr != old) {
  181. return kmem_buf_verify_bytes(ptr, &old, sizeof(old));
  182. }
  183. *ptr = new;
  184. }
  185. return NULL;
  186. }
  187. static inline union kmem_bufctl *
  188. kmem_buf_to_bufctl(void *buf, struct kmem_cache *cache)
  189. {
  190. return (union kmem_bufctl *)(buf + cache->bufctl_dist);
  191. }
  192. static inline struct kmem_buftag *
  193. kmem_buf_to_buftag(void *buf, struct kmem_cache *cache)
  194. {
  195. return (struct kmem_buftag *)(buf + cache->buftag_dist);
  196. }
  197. static inline void *
  198. kmem_bufctl_to_buf(union kmem_bufctl *bufctl, struct kmem_cache *cache)
  199. {
  200. return (void *)bufctl - cache->bufctl_dist;
  201. }
  202. static inline bool
  203. kmem_pagealloc_is_virtual(size_t size)
  204. {
  205. return (size > PAGE_SIZE);
  206. }
  207. static void *
  208. kmem_pagealloc(size_t size)
  209. {
  210. if (kmem_pagealloc_is_virtual(size)) {
  211. return vm_kmem_alloc(size);
  212. } else {
  213. struct vm_page *page;
  214. page = vm_page_alloc(vm_page_order(size), VM_PAGE_SEL_DIRECTMAP,
  215. VM_PAGE_KMEM);
  216. if (page == NULL) {
  217. return NULL;
  218. }
  219. return vm_page_direct_ptr(page);
  220. }
  221. }
  222. static void
  223. kmem_pagefree(void *ptr, size_t size)
  224. {
  225. if (kmem_pagealloc_is_virtual(size)) {
  226. vm_kmem_free(ptr, size);
  227. } else {
  228. struct vm_page *page;
  229. page = vm_page_lookup(vm_page_direct_pa((uintptr_t)ptr));
  230. assert(page != NULL);
  231. vm_page_free(page, vm_page_order(size));
  232. }
  233. }
  234. static void
  235. kmem_slab_create_verify(struct kmem_slab *slab, struct kmem_cache *cache)
  236. {
  237. struct kmem_buftag *buftag;
  238. unsigned long buffers;
  239. size_t buf_size;
  240. void *buf;
  241. buf_size = cache->buf_size;
  242. buf = slab->addr;
  243. buftag = kmem_buf_to_buftag(buf, cache);
  244. for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) {
  245. kmem_buf_fill(buf, KMEM_FREE_PATTERN, cache->bufctl_dist);
  246. buftag->state = KMEM_BUFTAG_FREE;
  247. buf += buf_size;
  248. buftag = kmem_buf_to_buftag(buf, cache);
  249. }
  250. }
  251. /*
  252. * Create an empty slab for a cache.
  253. *
  254. * The caller must drop all locks before calling this function.
  255. */
  256. static struct kmem_slab *
  257. kmem_slab_create(struct kmem_cache *cache, size_t color)
  258. {
  259. struct kmem_slab *slab;
  260. union kmem_bufctl *bufctl;
  261. size_t buf_size;
  262. unsigned long buffers;
  263. void *slab_buf;
  264. slab_buf = kmem_pagealloc(cache->slab_size);
  265. if (slab_buf == NULL) {
  266. return NULL;
  267. }
  268. if (cache->flags & KMEM_CF_SLAB_EXTERNAL) {
  269. slab = kmem_cache_alloc(&kmem_slab_cache);
  270. if (slab == NULL) {
  271. kmem_pagefree(slab_buf, cache->slab_size);
  272. return NULL;
  273. }
  274. } else {
  275. slab = (struct kmem_slab *)(slab_buf + cache->slab_size) - 1;
  276. }
  277. list_node_init(&slab->node);
  278. slab->nr_refs = 0;
  279. slab->first_free = NULL;
  280. slab->addr = slab_buf + color;
  281. buf_size = cache->buf_size;
  282. bufctl = kmem_buf_to_bufctl(slab->addr, cache);
  283. for (buffers = cache->bufs_per_slab; buffers != 0; buffers--) {
  284. bufctl->next = slab->first_free;
  285. slab->first_free = bufctl;
  286. bufctl = (union kmem_bufctl *)((void *)bufctl + buf_size);
  287. }
  288. if (cache->flags & KMEM_CF_VERIFY) {
  289. kmem_slab_create_verify(slab, cache);
  290. }
  291. return slab;
  292. }
  293. static inline uintptr_t
  294. kmem_slab_buf(const struct kmem_slab *slab)
  295. {
  296. return P2ALIGN((uintptr_t)slab->addr, PAGE_SIZE);
  297. }
  298. #ifdef KMEM_USE_CPU_LAYER
  299. static void
  300. kmem_cpu_pool_init(struct kmem_cpu_pool *cpu_pool, struct kmem_cache *cache)
  301. {
  302. mutex_init(&cpu_pool->lock);
  303. cpu_pool->flags = cache->flags;
  304. cpu_pool->size = 0;
  305. cpu_pool->transfer_size = 0;
  306. cpu_pool->nr_objs = 0;
  307. cpu_pool->array = NULL;
  308. }
  309. static inline struct kmem_cpu_pool *
  310. kmem_cpu_pool_get(struct kmem_cache *cache)
  311. {
  312. return &cache->cpu_pools[cpu_id()];
  313. }
  314. static inline void
  315. kmem_cpu_pool_build(struct kmem_cpu_pool *cpu_pool, struct kmem_cache *cache,
  316. void **array)
  317. {
  318. cpu_pool->size = cache->cpu_pool_type->array_size;
  319. cpu_pool->transfer_size = (cpu_pool->size
  320. + KMEM_CPU_POOL_TRANSFER_RATIO - 1)
  321. / KMEM_CPU_POOL_TRANSFER_RATIO;
  322. cpu_pool->array = array;
  323. }
  324. static inline void *
  325. kmem_cpu_pool_pop(struct kmem_cpu_pool *cpu_pool)
  326. {
  327. cpu_pool->nr_objs--;
  328. return cpu_pool->array[cpu_pool->nr_objs];
  329. }
  330. static inline void
  331. kmem_cpu_pool_push(struct kmem_cpu_pool *cpu_pool, void *obj)
  332. {
  333. cpu_pool->array[cpu_pool->nr_objs] = obj;
  334. cpu_pool->nr_objs++;
  335. }
  336. static int
  337. kmem_cpu_pool_fill(struct kmem_cpu_pool *cpu_pool, struct kmem_cache *cache)
  338. {
  339. kmem_ctor_fn_t ctor;
  340. void *buf;
  341. int i;
  342. ctor = (cpu_pool->flags & KMEM_CF_VERIFY) ? NULL : cache->ctor;
  343. mutex_lock(&cache->lock);
  344. for (i = 0; i < cpu_pool->transfer_size; i++) {
  345. buf = kmem_cache_alloc_from_slab(cache);
  346. if (buf == NULL) {
  347. break;
  348. }
  349. if (ctor != NULL) {
  350. ctor(buf);
  351. }
  352. kmem_cpu_pool_push(cpu_pool, buf);
  353. }
  354. mutex_unlock(&cache->lock);
  355. return i;
  356. }
  357. static void
  358. kmem_cpu_pool_drain(struct kmem_cpu_pool *cpu_pool, struct kmem_cache *cache)
  359. {
  360. void *obj;
  361. int i;
  362. mutex_lock(&cache->lock);
  363. for (i = cpu_pool->transfer_size; i > 0; i--) {
  364. obj = kmem_cpu_pool_pop(cpu_pool);
  365. kmem_cache_free_to_slab(cache, obj);
  366. }
  367. mutex_unlock(&cache->lock);
  368. }
  369. #endif /* KMEM_USE_CPU_LAYER */
  370. static void
  371. kmem_cache_error(struct kmem_cache *cache, void *buf, int error, void *arg)
  372. {
  373. struct kmem_buftag *buftag;
  374. printf_ln("kmem: error: cache: %s, buffer: %p", cache->name, buf);
  375. switch(error) {
  376. case KMEM_ERR_INVALID:
  377. panic("kmem: freeing invalid address");
  378. break;
  379. case KMEM_ERR_DOUBLEFREE:
  380. panic("kmem: attempting to free the same address twice");
  381. break;
  382. case KMEM_ERR_BUFTAG:
  383. buftag = arg;
  384. panic("kmem: invalid buftag content, buftag state: %p",
  385. (void *)buftag->state);
  386. break;
  387. case KMEM_ERR_MODIFIED:
  388. panic("kmem: free buffer modified, fault address: %p, "
  389. "offset in buffer: %td", arg, arg - buf);
  390. break;
  391. case KMEM_ERR_REDZONE:
  392. panic("kmem: write beyond end of buffer, fault address: %p, "
  393. "offset in buffer: %td", arg, arg - buf);
  394. break;
  395. default:
  396. panic("kmem: unknown error");
  397. }
  398. /*
  399. * Never reached.
  400. */
  401. }
  402. /*
  403. * Compute properties such as slab size for the given cache.
  404. *
  405. * Once the slab size is known, this function sets the related properties
  406. * (buffers per slab and maximum color). It can also set some KMEM_CF_xxx
  407. * flags depending on the resulting layout.
  408. */
  409. static void
  410. kmem_cache_compute_properties(struct kmem_cache *cache, int flags)
  411. {
  412. size_t size, waste;
  413. int embed;
  414. if (cache->buf_size < KMEM_BUF_SIZE_THRESHOLD) {
  415. flags |= KMEM_CACHE_NOOFFSLAB;
  416. }
  417. cache->slab_size = PAGE_SIZE;
  418. for (;;) {
  419. if (flags & KMEM_CACHE_NOOFFSLAB) {
  420. embed = 1;
  421. } else {
  422. waste = cache->slab_size % cache->buf_size;
  423. embed = (sizeof(struct kmem_slab) <= waste);
  424. }
  425. size = cache->slab_size;
  426. if (embed) {
  427. size -= sizeof(struct kmem_slab);
  428. }
  429. if (size >= cache->buf_size) {
  430. break;
  431. }
  432. cache->slab_size += PAGE_SIZE;
  433. }
  434. /*
  435. * A user may force page allocation in order to guarantee that virtual
  436. * memory isn't used. This is normally done for objects that are used
  437. * to implement virtual memory and avoid circular dependencies.
  438. *
  439. * When forcing the use of direct page allocation, only allow single
  440. * page allocations in order to completely prevent physical memory
  441. * fragmentation from making slab allocations fail.
  442. */
  443. if ((flags & KMEM_CACHE_PAGE_ONLY) && (cache->slab_size != PAGE_SIZE)) {
  444. panic("kmem: unable to guarantee page allocation");
  445. }
  446. cache->bufs_per_slab = size / cache->buf_size;
  447. cache->color_max = size % cache->buf_size;
  448. /*
  449. * Make sure the first page of a slab buffer can be found from the
  450. * address of the first object.
  451. *
  452. * See kmem_slab_buf().
  453. */
  454. if (cache->color_max >= PAGE_SIZE) {
  455. cache->color_max = 0;
  456. }
  457. if (!embed) {
  458. cache->flags |= KMEM_CF_SLAB_EXTERNAL;
  459. }
  460. }
  461. void
  462. kmem_cache_init(struct kmem_cache *cache, const char *name, size_t obj_size,
  463. size_t align, kmem_ctor_fn_t ctor, int flags)
  464. {
  465. size_t buf_size;
  466. #ifdef CONFIG_KMEM_DEBUG
  467. cache->flags = KMEM_CF_VERIFY;
  468. #else /* CONFIG_KMEM_DEBUG */
  469. cache->flags = 0;
  470. #endif /* CONFIG_KMEM_DEBUG */
  471. if (flags & KMEM_CACHE_VERIFY) {
  472. cache->flags |= KMEM_CF_VERIFY;
  473. }
  474. if (align < KMEM_ALIGN_MIN) {
  475. align = KMEM_ALIGN_MIN;
  476. }
  477. assert(obj_size > 0);
  478. assert(ISP2(align));
  479. assert(align < PAGE_SIZE);
  480. buf_size = P2ROUND(obj_size, align);
  481. mutex_init(&cache->lock);
  482. list_node_init(&cache->node);
  483. list_init(&cache->partial_slabs);
  484. list_init(&cache->free_slabs);
  485. cache->obj_size = obj_size;
  486. cache->align = align;
  487. cache->buf_size = buf_size;
  488. cache->bufctl_dist = buf_size - sizeof(union kmem_bufctl);
  489. cache->color = 0;
  490. cache->nr_objs = 0;
  491. cache->nr_bufs = 0;
  492. cache->nr_slabs = 0;
  493. cache->nr_free_slabs = 0;
  494. cache->ctor = ctor;
  495. strlcpy(cache->name, name, sizeof(cache->name));
  496. cache->buftag_dist = 0;
  497. cache->redzone_pad = 0;
  498. if (cache->flags & KMEM_CF_VERIFY) {
  499. cache->bufctl_dist = buf_size;
  500. cache->buftag_dist = cache->bufctl_dist + sizeof(union kmem_bufctl);
  501. cache->redzone_pad = cache->bufctl_dist - cache->obj_size;
  502. buf_size += sizeof(union kmem_bufctl) + sizeof(struct kmem_buftag);
  503. buf_size = P2ROUND(buf_size, align);
  504. cache->buf_size = buf_size;
  505. }
  506. kmem_cache_compute_properties(cache, flags);
  507. #ifdef KMEM_USE_CPU_LAYER
  508. for (cache->cpu_pool_type = kmem_cpu_pool_types;
  509. buf_size <= cache->cpu_pool_type->buf_size;
  510. cache->cpu_pool_type++);
  511. for (size_t i = 0; i < ARRAY_SIZE(cache->cpu_pools); i++) {
  512. kmem_cpu_pool_init(&cache->cpu_pools[i], cache);
  513. }
  514. #endif /* KMEM_USE_CPU_LAYER */
  515. mutex_lock(&kmem_cache_list_lock);
  516. list_insert_tail(&kmem_cache_list, &cache->node);
  517. mutex_unlock(&kmem_cache_list_lock);
  518. }
  519. static inline int
  520. kmem_cache_empty(struct kmem_cache *cache)
  521. {
  522. return cache->nr_objs == cache->nr_bufs;
  523. }
  524. static struct kmem_slab *
  525. kmem_cache_buf_to_slab(const struct kmem_cache *cache, void *buf)
  526. {
  527. if ((cache->flags & KMEM_CF_SLAB_EXTERNAL)
  528. || (cache->slab_size != PAGE_SIZE)) {
  529. return NULL;
  530. }
  531. return (struct kmem_slab *)vm_page_end((uintptr_t)buf) - 1;
  532. }
  533. static inline bool
  534. kmem_cache_registration_required(const struct kmem_cache *cache)
  535. {
  536. return ((cache->flags & KMEM_CF_SLAB_EXTERNAL)
  537. || (cache->flags & KMEM_CF_VERIFY)
  538. || (cache->slab_size != PAGE_SIZE));
  539. }
  540. static void
  541. kmem_cache_register(struct kmem_cache *cache, struct kmem_slab *slab)
  542. {
  543. struct vm_page *page;
  544. uintptr_t va, end;
  545. phys_addr_t pa;
  546. bool virtual;
  547. int error;
  548. assert(kmem_cache_registration_required(cache));
  549. assert(slab->nr_refs == 0);
  550. virtual = kmem_pagealloc_is_virtual(cache->slab_size);
  551. for (va = kmem_slab_buf(slab), end = va + cache->slab_size;
  552. va < end;
  553. va += PAGE_SIZE) {
  554. if (virtual) {
  555. error = pmap_kextract(va, &pa);
  556. assert(!error);
  557. } else {
  558. pa = vm_page_direct_pa(va);
  559. }
  560. page = vm_page_lookup(pa);
  561. assert(page != NULL);
  562. assert((virtual && vm_page_type(page) == VM_PAGE_KERNEL)
  563. || (!virtual && vm_page_type(page) == VM_PAGE_KMEM));
  564. assert(vm_page_get_priv(page) == NULL);
  565. vm_page_set_priv(page, slab);
  566. }
  567. }
  568. static struct kmem_slab *
  569. kmem_cache_lookup(struct kmem_cache *cache, void *buf)
  570. {
  571. struct kmem_slab *slab;
  572. struct vm_page *page;
  573. uintptr_t va;
  574. phys_addr_t pa;
  575. bool virtual;
  576. int error;
  577. assert(kmem_cache_registration_required(cache));
  578. virtual = kmem_pagealloc_is_virtual(cache->slab_size);
  579. va = (uintptr_t)buf;
  580. if (virtual) {
  581. error = pmap_kextract(va, &pa);
  582. if (error) {
  583. return NULL;
  584. }
  585. } else {
  586. pa = vm_page_direct_pa(va);
  587. }
  588. page = vm_page_lookup(pa);
  589. if (page == NULL) {
  590. return NULL;
  591. }
  592. if ((virtual && (vm_page_type(page) != VM_PAGE_KERNEL))
  593. || (!virtual && (vm_page_type(page) != VM_PAGE_KMEM))) {
  594. return NULL;
  595. }
  596. slab = vm_page_get_priv(page);
  597. assert((uintptr_t)buf >= kmem_slab_buf(slab));
  598. assert((uintptr_t)buf < (kmem_slab_buf(slab) + cache->slab_size));
  599. return slab;
  600. }
  601. static int
  602. kmem_cache_grow(struct kmem_cache *cache)
  603. {
  604. struct kmem_slab *slab;
  605. size_t color;
  606. int empty;
  607. mutex_lock(&cache->lock);
  608. if (!kmem_cache_empty(cache)) {
  609. mutex_unlock(&cache->lock);
  610. return 1;
  611. }
  612. color = cache->color;
  613. cache->color += cache->align;
  614. if (cache->color > cache->color_max) {
  615. cache->color = 0;
  616. }
  617. mutex_unlock(&cache->lock);
  618. slab = kmem_slab_create(cache, color);
  619. mutex_lock(&cache->lock);
  620. if (slab != NULL) {
  621. list_insert_head(&cache->free_slabs, &slab->node);
  622. cache->nr_bufs += cache->bufs_per_slab;
  623. cache->nr_slabs++;
  624. cache->nr_free_slabs++;
  625. if (kmem_cache_registration_required(cache)) {
  626. kmem_cache_register(cache, slab);
  627. }
  628. }
  629. /*
  630. * Even if our slab creation failed, another thread might have succeeded
  631. * in growing the cache.
  632. */
  633. empty = kmem_cache_empty(cache);
  634. mutex_unlock(&cache->lock);
  635. return !empty;
  636. }
  637. /*
  638. * Allocate a raw (unconstructed) buffer from the slab layer of a cache.
  639. *
  640. * The cache must be locked before calling this function.
  641. */
  642. static void *
  643. kmem_cache_alloc_from_slab(struct kmem_cache *cache)
  644. {
  645. struct kmem_slab *slab;
  646. union kmem_bufctl *bufctl;
  647. if (!list_empty(&cache->partial_slabs)) {
  648. slab = list_first_entry(&cache->partial_slabs, struct kmem_slab, node);
  649. } else if (!list_empty(&cache->free_slabs)) {
  650. slab = list_first_entry(&cache->free_slabs, struct kmem_slab, node);
  651. } else {
  652. return NULL;
  653. }
  654. bufctl = slab->first_free;
  655. assert(bufctl != NULL);
  656. slab->first_free = bufctl->next;
  657. slab->nr_refs++;
  658. cache->nr_objs++;
  659. if (slab->nr_refs == cache->bufs_per_slab) {
  660. /* The slab has become complete */
  661. list_remove(&slab->node);
  662. if (slab->nr_refs == 1) {
  663. cache->nr_free_slabs--;
  664. }
  665. } else if (slab->nr_refs == 1) {
  666. /*
  667. * The slab has become partial. Insert the new slab at the end of
  668. * the list to reduce fragmentation.
  669. */
  670. list_remove(&slab->node);
  671. list_insert_tail(&cache->partial_slabs, &slab->node);
  672. cache->nr_free_slabs--;
  673. }
  674. return kmem_bufctl_to_buf(bufctl, cache);
  675. }
  676. /*
  677. * Release a buffer to the slab layer of a cache.
  678. *
  679. * The cache must be locked before calling this function.
  680. */
  681. static void
  682. kmem_cache_free_to_slab(struct kmem_cache *cache, void *buf)
  683. {
  684. struct kmem_slab *slab;
  685. union kmem_bufctl *bufctl;
  686. slab = kmem_cache_buf_to_slab(cache, buf);
  687. if (slab == NULL) {
  688. slab = kmem_cache_lookup(cache, buf);
  689. assert(slab != NULL);
  690. }
  691. assert(slab->nr_refs >= 1);
  692. assert(slab->nr_refs <= cache->bufs_per_slab);
  693. bufctl = kmem_buf_to_bufctl(buf, cache);
  694. bufctl->next = slab->first_free;
  695. slab->first_free = bufctl;
  696. slab->nr_refs--;
  697. cache->nr_objs--;
  698. if (slab->nr_refs == 0) {
  699. /* The slab has become free */
  700. /* If it was partial, remove it from its list */
  701. if (cache->bufs_per_slab != 1) {
  702. list_remove(&slab->node);
  703. }
  704. list_insert_head(&cache->free_slabs, &slab->node);
  705. cache->nr_free_slabs++;
  706. } else if (slab->nr_refs == (cache->bufs_per_slab - 1)) {
  707. /* The slab has become partial */
  708. list_insert_head(&cache->partial_slabs, &slab->node);
  709. }
  710. }
  711. static void
  712. kmem_cache_alloc_verify(struct kmem_cache *cache, void *buf, int construct)
  713. {
  714. struct kmem_buftag *buftag;
  715. union kmem_bufctl *bufctl;
  716. void *addr;
  717. buftag = kmem_buf_to_buftag(buf, cache);
  718. if (buftag->state != KMEM_BUFTAG_FREE) {
  719. kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG, buftag);
  720. }
  721. addr = kmem_buf_verify_fill(buf, KMEM_FREE_PATTERN, KMEM_UNINIT_PATTERN,
  722. cache->bufctl_dist);
  723. if (addr != NULL) {
  724. kmem_cache_error(cache, buf, KMEM_ERR_MODIFIED, addr);
  725. }
  726. addr = buf + cache->obj_size;
  727. memset(addr, KMEM_REDZONE_BYTE, cache->redzone_pad);
  728. bufctl = kmem_buf_to_bufctl(buf, cache);
  729. bufctl->redzone = KMEM_REDZONE_WORD;
  730. buftag->state = KMEM_BUFTAG_ALLOC;
  731. if (construct && (cache->ctor != NULL)) {
  732. cache->ctor(buf);
  733. }
  734. }
  735. void *
  736. kmem_cache_alloc(struct kmem_cache *cache)
  737. {
  738. bool filled;
  739. void *buf;
  740. #ifdef KMEM_USE_CPU_LAYER
  741. struct kmem_cpu_pool *cpu_pool;
  742. thread_pin();
  743. cpu_pool = kmem_cpu_pool_get(cache);
  744. mutex_lock(&cpu_pool->lock);
  745. fast_alloc:
  746. if (likely(cpu_pool->nr_objs > 0)) {
  747. bool verify;
  748. buf = kmem_cpu_pool_pop(cpu_pool);
  749. verify = (cpu_pool->flags & KMEM_CF_VERIFY);
  750. mutex_unlock(&cpu_pool->lock);
  751. thread_unpin();
  752. if (verify) {
  753. kmem_cache_alloc_verify(cache, buf, KMEM_AV_CONSTRUCT);
  754. }
  755. return buf;
  756. }
  757. if (cpu_pool->array != NULL) {
  758. filled = kmem_cpu_pool_fill(cpu_pool, cache);
  759. if (!filled) {
  760. mutex_unlock(&cpu_pool->lock);
  761. thread_unpin();
  762. filled = kmem_cache_grow(cache);
  763. if (!filled) {
  764. return NULL;
  765. }
  766. thread_pin();
  767. cpu_pool = kmem_cpu_pool_get(cache);
  768. mutex_lock(&cpu_pool->lock);
  769. }
  770. goto fast_alloc;
  771. }
  772. mutex_unlock(&cpu_pool->lock);
  773. thread_unpin();
  774. #endif /* KMEM_USE_CPU_LAYER */
  775. slab_alloc:
  776. mutex_lock(&cache->lock);
  777. buf = kmem_cache_alloc_from_slab(cache);
  778. mutex_unlock(&cache->lock);
  779. if (buf == NULL) {
  780. filled = kmem_cache_grow(cache);
  781. if (!filled) {
  782. return NULL;
  783. }
  784. goto slab_alloc;
  785. }
  786. if (cache->flags & KMEM_CF_VERIFY) {
  787. kmem_cache_alloc_verify(cache, buf, KMEM_AV_NOCONSTRUCT);
  788. }
  789. if (cache->ctor != NULL) {
  790. cache->ctor(buf);
  791. }
  792. return buf;
  793. }
  794. static void
  795. kmem_cache_free_verify(struct kmem_cache *cache, void *buf)
  796. {
  797. struct kmem_buftag *buftag;
  798. struct kmem_slab *slab;
  799. union kmem_bufctl *bufctl;
  800. unsigned char *redzone_byte;
  801. uintptr_t slabend;
  802. slab = kmem_cache_lookup(cache, buf);
  803. if (slab == NULL) {
  804. kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL);
  805. }
  806. slabend = P2ALIGN((uintptr_t)slab->addr + cache->slab_size, PAGE_SIZE);
  807. if ((uintptr_t)buf >= slabend) {
  808. kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL);
  809. }
  810. if ((((uintptr_t)buf - (uintptr_t)slab->addr) % cache->buf_size)
  811. != 0) {
  812. kmem_cache_error(cache, buf, KMEM_ERR_INVALID, NULL);
  813. }
  814. /*
  815. * As the buffer address is valid, accessing its buftag is safe.
  816. */
  817. buftag = kmem_buf_to_buftag(buf, cache);
  818. if (buftag->state != KMEM_BUFTAG_ALLOC) {
  819. if (buftag->state == KMEM_BUFTAG_FREE) {
  820. kmem_cache_error(cache, buf, KMEM_ERR_DOUBLEFREE, NULL);
  821. } else {
  822. kmem_cache_error(cache, buf, KMEM_ERR_BUFTAG, buftag);
  823. }
  824. }
  825. redzone_byte = buf + cache->obj_size;
  826. bufctl = kmem_buf_to_bufctl(buf, cache);
  827. while (redzone_byte < (unsigned char *)bufctl) {
  828. if (*redzone_byte != KMEM_REDZONE_BYTE) {
  829. kmem_cache_error(cache, buf, KMEM_ERR_REDZONE, redzone_byte);
  830. }
  831. redzone_byte++;
  832. }
  833. if (bufctl->redzone != KMEM_REDZONE_WORD) {
  834. unsigned long word;
  835. word = KMEM_REDZONE_WORD;
  836. redzone_byte = kmem_buf_verify_bytes(&bufctl->redzone, &word,
  837. sizeof(bufctl->redzone));
  838. kmem_cache_error(cache, buf, KMEM_ERR_REDZONE, redzone_byte);
  839. }
  840. kmem_buf_fill(buf, KMEM_FREE_PATTERN, cache->bufctl_dist);
  841. buftag->state = KMEM_BUFTAG_FREE;
  842. }
  843. void
  844. kmem_cache_free(struct kmem_cache *cache, void *obj)
  845. {
  846. #ifdef KMEM_USE_CPU_LAYER
  847. struct kmem_cpu_pool *cpu_pool;
  848. void **array;
  849. thread_pin();
  850. cpu_pool = kmem_cpu_pool_get(cache);
  851. if (cpu_pool->flags & KMEM_CF_VERIFY) {
  852. thread_unpin();
  853. kmem_cache_free_verify(cache, obj);
  854. thread_pin();
  855. cpu_pool = kmem_cpu_pool_get(cache);
  856. }
  857. mutex_lock(&cpu_pool->lock);
  858. fast_free:
  859. if (likely(cpu_pool->nr_objs < cpu_pool->size)) {
  860. kmem_cpu_pool_push(cpu_pool, obj);
  861. mutex_unlock(&cpu_pool->lock);
  862. thread_unpin();
  863. return;
  864. }
  865. if (cpu_pool->array != NULL) {
  866. kmem_cpu_pool_drain(cpu_pool, cache);
  867. goto fast_free;
  868. }
  869. mutex_unlock(&cpu_pool->lock);
  870. array = kmem_cache_alloc(cache->cpu_pool_type->array_cache);
  871. if (array != NULL) {
  872. mutex_lock(&cpu_pool->lock);
  873. /*
  874. * Another thread may have built the CPU pool while the lock was
  875. * dropped.
  876. */
  877. if (cpu_pool->array != NULL) {
  878. mutex_unlock(&cpu_pool->lock);
  879. thread_unpin();
  880. kmem_cache_free(cache->cpu_pool_type->array_cache, array);
  881. thread_pin();
  882. cpu_pool = kmem_cpu_pool_get(cache);
  883. mutex_lock(&cpu_pool->lock);
  884. goto fast_free;
  885. }
  886. kmem_cpu_pool_build(cpu_pool, cache, array);
  887. goto fast_free;
  888. }
  889. thread_unpin();
  890. #else /* KMEM_USE_CPU_LAYER */
  891. if (cache->flags & KMEM_CF_VERIFY) {
  892. kmem_cache_free_verify(cache, obj);
  893. }
  894. #endif /* KMEM_USE_CPU_LAYER */
  895. mutex_lock(&cache->lock);
  896. kmem_cache_free_to_slab(cache, obj);
  897. mutex_unlock(&cache->lock);
  898. }
  899. void
  900. kmem_cache_info(struct kmem_cache *cache, log_print_fn_t print_fn)
  901. {
  902. char flags_str[64];
  903. snprintf(flags_str, sizeof(flags_str), "%s%s",
  904. (cache->flags & KMEM_CF_SLAB_EXTERNAL) ? " SLAB_EXTERNAL" : "",
  905. (cache->flags & KMEM_CF_VERIFY) ? " VERIFY" : "");
  906. mutex_lock(&cache->lock);
  907. print_fn("kmem: flags: 0x%x%s", cache->flags, flags_str);
  908. print_fn("kmem: obj_size: %zu", cache->obj_size);
  909. print_fn("kmem: align: %zu", cache->align);
  910. print_fn("kmem: buf_size: %zu", cache->buf_size);
  911. print_fn("kmem: bufctl_dist: %zu", cache->bufctl_dist);
  912. print_fn("kmem: slab_size: %zu", cache->slab_size);
  913. print_fn("kmem: color_max: %zu", cache->color_max);
  914. print_fn("kmem: bufs_per_slab: %lu", cache->bufs_per_slab);
  915. print_fn("kmem: nr_objs: %lu", cache->nr_objs);
  916. print_fn("kmem: nr_bufs: %lu", cache->nr_bufs);
  917. print_fn("kmem: nr_slabs: %lu", cache->nr_slabs);
  918. print_fn("kmem: nr_free_slabs: %lu", cache->nr_free_slabs);
  919. print_fn("kmem: buftag_dist: %zu", cache->buftag_dist);
  920. print_fn("kmem: redzone_pad: %zu", cache->redzone_pad);
  921. #ifdef KMEM_USE_CPU_LAYER
  922. print_fn("kmem: cpu_pool_size: %d", cache->cpu_pool_type->array_size);
  923. #endif /* KMEM_USE_CPU_LAYER */
  924. mutex_unlock(&cache->lock);
  925. }
  926. #ifdef CONFIG_SHELL
  927. static struct kmem_cache *
  928. kmem_lookup_cache(const char *name)
  929. {
  930. struct kmem_cache *cache;
  931. mutex_lock(&kmem_cache_list_lock);
  932. list_for_each_entry(&kmem_cache_list, cache, node) {
  933. if (strcmp(cache->name, name) == 0) {
  934. goto out;
  935. }
  936. }
  937. cache = NULL;
  938. out:
  939. mutex_unlock(&kmem_cache_list_lock);
  940. return cache;
  941. }
  942. static void
  943. kmem_shell_info(struct shell *shell, int argc, char **argv)
  944. {
  945. struct kmem_cache *cache;
  946. (void)shell;
  947. if (argc < 2) {
  948. kmem_info(printf_ln);
  949. } else {
  950. cache = kmem_lookup_cache(argv[1]);
  951. if (cache == NULL) {
  952. printf_ln("kmem: info: invalid argument");
  953. return;
  954. }
  955. kmem_cache_info(cache, printf_ln);
  956. }
  957. }
  958. static struct shell_cmd kmem_shell_cmds[] = {
  959. SHELL_CMD_INITIALIZER("kmem_info", kmem_shell_info,
  960. "kmem_info [<cache_name>]",
  961. "display information about kernel memory and caches"),
  962. };
  963. static int __init
  964. kmem_setup_shell(void)
  965. {
  966. SHELL_REGISTER_CMDS(kmem_shell_cmds, shell_get_main_cmd_set());
  967. return 0;
  968. }
  969. INIT_OP_DEFINE(kmem_setup_shell,
  970. INIT_OP_DEP(kmem_setup, true),
  971. INIT_OP_DEP(printf_setup, true),
  972. INIT_OP_DEP(shell_setup, true),
  973. INIT_OP_DEP(thread_setup, true));
  974. #endif /* CONFIG_SHELL */
  975. #ifdef KMEM_USE_CPU_LAYER
  976. static void
  977. kmem_bootstrap_cpu(void)
  978. {
  979. struct kmem_cpu_pool_type *cpu_pool_type;
  980. char name[KMEM_NAME_SIZE];
  981. size_t size;
  982. for (size_t i = 0; i < ARRAY_SIZE(kmem_cpu_pool_types); i++) {
  983. cpu_pool_type = &kmem_cpu_pool_types[i];
  984. cpu_pool_type->array_cache = &kmem_cpu_array_caches[i];
  985. sprintf(name, "kmem_cpu_array_%d", cpu_pool_type->array_size);
  986. size = sizeof(void *) * cpu_pool_type->array_size;
  987. kmem_cache_init(cpu_pool_type->array_cache, name, size,
  988. cpu_pool_type->array_align, NULL, 0);
  989. }
  990. }
  991. #endif /* KMEM_USE_CPU_LAYER */
  992. static int __init
  993. kmem_bootstrap(void)
  994. {
  995. char name[KMEM_NAME_SIZE];
  996. size_t size;
  997. /* Make sure a bufctl can always be stored in a buffer */
  998. assert(sizeof(union kmem_bufctl) <= KMEM_ALIGN_MIN);
  999. list_init(&kmem_cache_list);
  1000. mutex_init(&kmem_cache_list_lock);
  1001. #ifdef KMEM_USE_CPU_LAYER
  1002. kmem_bootstrap_cpu();
  1003. #endif /* KMEM_USE_CPU_LAYER */
  1004. /*
  1005. * Prevent off slab data for the slab cache to avoid infinite recursion.
  1006. */
  1007. kmem_cache_init(&kmem_slab_cache, "kmem_slab", sizeof(struct kmem_slab),
  1008. 0, NULL, KMEM_CACHE_NOOFFSLAB);
  1009. size = 1 << KMEM_CACHES_FIRST_ORDER;
  1010. for (size_t i = 0; i < ARRAY_SIZE(kmem_caches); i++) {
  1011. sprintf(name, "kmem_%zu", size);
  1012. kmem_cache_init(&kmem_caches[i], name, size, 0, NULL, 0);
  1013. size <<= 1;
  1014. }
  1015. return 0;
  1016. }
  1017. INIT_OP_DEFINE(kmem_bootstrap,
  1018. INIT_OP_DEP(thread_bootstrap, true),
  1019. INIT_OP_DEP(vm_page_setup, true));
  1020. static int __init
  1021. kmem_setup(void)
  1022. {
  1023. return 0;
  1024. }
  1025. INIT_OP_DEFINE(kmem_setup,
  1026. INIT_OP_DEP(kmem_bootstrap, true),
  1027. INIT_OP_DEP(vm_kmem_setup, true));
  1028. static inline size_t
  1029. kmem_get_index(unsigned long size)
  1030. {
  1031. return log2_order(size) - KMEM_CACHES_FIRST_ORDER;
  1032. }
  1033. static void
  1034. kmem_alloc_verify(struct kmem_cache *cache, void *buf, size_t size)
  1035. {
  1036. size_t redzone_size;
  1037. void *redzone;
  1038. assert(size <= cache->obj_size);
  1039. redzone = buf + size;
  1040. redzone_size = cache->obj_size - size;
  1041. memset(redzone, KMEM_REDZONE_BYTE, redzone_size);
  1042. }
  1043. void *
  1044. kmem_alloc(size_t size)
  1045. {
  1046. size_t index;
  1047. void *buf;
  1048. if (size == 0) {
  1049. return NULL;
  1050. }
  1051. index = kmem_get_index(size);
  1052. if (index < ARRAY_SIZE(kmem_caches)) {
  1053. struct kmem_cache *cache;
  1054. cache = &kmem_caches[index];
  1055. buf = kmem_cache_alloc(cache);
  1056. if ((buf != NULL) && (cache->flags & KMEM_CF_VERIFY)) {
  1057. kmem_alloc_verify(cache, buf, size);
  1058. }
  1059. } else {
  1060. buf = kmem_pagealloc(size);
  1061. }
  1062. return buf;
  1063. }
  1064. void *
  1065. kmem_zalloc(size_t size)
  1066. {
  1067. void *ptr;
  1068. ptr = kmem_alloc(size);
  1069. if (ptr == NULL) {
  1070. return NULL;
  1071. }
  1072. memset(ptr, 0, size);
  1073. return ptr;
  1074. }
  1075. static void
  1076. kmem_free_verify(struct kmem_cache *cache, void *buf, size_t size)
  1077. {
  1078. unsigned char *redzone_byte, *redzone_end;
  1079. assert(size <= cache->obj_size);
  1080. redzone_byte = buf + size;
  1081. redzone_end = buf + cache->obj_size;
  1082. while (redzone_byte < redzone_end) {
  1083. if (*redzone_byte != KMEM_REDZONE_BYTE) {
  1084. kmem_cache_error(cache, buf, KMEM_ERR_REDZONE, redzone_byte);
  1085. }
  1086. redzone_byte++;
  1087. }
  1088. }
  1089. void
  1090. kmem_free(void *ptr, size_t size)
  1091. {
  1092. size_t index;
  1093. if ((ptr == NULL) || (size == 0)) {
  1094. return;
  1095. }
  1096. index = kmem_get_index(size);
  1097. if (index < ARRAY_SIZE(kmem_caches)) {
  1098. struct kmem_cache *cache;
  1099. cache = &kmem_caches[index];
  1100. if (cache->flags & KMEM_CF_VERIFY) {
  1101. kmem_free_verify(cache, ptr, size);
  1102. }
  1103. kmem_cache_free(cache, ptr);
  1104. } else {
  1105. kmem_pagefree(ptr, size);
  1106. }
  1107. }
  1108. void
  1109. kmem_info(log_print_fn_t print_fn)
  1110. {
  1111. size_t total_reclaim, total_reclaim_physical, total_reclaim_virtual;
  1112. size_t total, total_physical, total_virtual;
  1113. size_t mem_usage, mem_reclaim;
  1114. struct kmem_cache *cache;
  1115. total = 0;
  1116. total_physical = 0;
  1117. total_virtual = 0;
  1118. total_reclaim = 0;
  1119. total_reclaim_physical = 0;
  1120. total_reclaim_virtual = 0;
  1121. print_fn("kmem: cache obj slab bufs objs bufs "
  1122. " total reclaimable");
  1123. print_fn("kmem: name size size /slab usage count "
  1124. " memory memory");
  1125. mutex_lock(&kmem_cache_list_lock);
  1126. list_for_each_entry(&kmem_cache_list, cache, node) {
  1127. mutex_lock(&cache->lock);
  1128. mem_usage = (cache->nr_slabs * cache->slab_size) >> 10;
  1129. mem_reclaim = (cache->nr_free_slabs * cache->slab_size) >> 10;
  1130. total += mem_usage;
  1131. total_reclaim += mem_reclaim;
  1132. if (kmem_pagealloc_is_virtual(cache->slab_size)) {
  1133. total_virtual += mem_usage;
  1134. total_reclaim_virtual += mem_reclaim;
  1135. } else {
  1136. total_physical += mem_usage;
  1137. total_reclaim_physical += mem_reclaim;
  1138. }
  1139. print_fn("kmem: %-19s %6zu %3zuk %4lu %6lu %6lu %7zuk %10zuk",
  1140. cache->name, cache->obj_size, cache->slab_size >> 10,
  1141. cache->bufs_per_slab, cache->nr_objs, cache->nr_bufs,
  1142. mem_usage, mem_reclaim);
  1143. mutex_unlock(&cache->lock);
  1144. }
  1145. mutex_unlock(&kmem_cache_list_lock);
  1146. print_fn("total: %zuk (phys: %zuk virt: %zuk), "
  1147. "reclaim: %zuk (phys: %zuk virt: %zuk)",
  1148. total, total_physical, total_virtual,
  1149. total_reclaim, total_reclaim_physical, total_reclaim_virtual);
  1150. }