linux.c 2.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <stdlib.h>
  3. #include <string.h>
  4. #include <malloc.h>
  5. #include <pthread.h>
  6. #include <unistd.h>
  7. #include <assert.h>
  8. #include <linux/gfp.h>
  9. #include <linux/poison.h>
  10. #include <linux/slab.h>
  11. #include <linux/radix-tree.h>
  12. #include <urcu/uatomic.h>
  13. int nr_allocated;
  14. int preempt_count;
  15. int kmalloc_verbose;
  16. int test_verbose;
  17. struct kmem_cache {
  18. pthread_mutex_t lock;
  19. int size;
  20. int nr_objs;
  21. void *objs;
  22. void (*ctor)(void *);
  23. };
  24. void *kmem_cache_alloc(struct kmem_cache *cachep, int flags)
  25. {
  26. struct radix_tree_node *node;
  27. if (!(flags & __GFP_DIRECT_RECLAIM))
  28. return NULL;
  29. pthread_mutex_lock(&cachep->lock);
  30. if (cachep->nr_objs) {
  31. cachep->nr_objs--;
  32. node = cachep->objs;
  33. cachep->objs = node->parent;
  34. pthread_mutex_unlock(&cachep->lock);
  35. node->parent = NULL;
  36. } else {
  37. pthread_mutex_unlock(&cachep->lock);
  38. node = malloc(cachep->size);
  39. if (cachep->ctor)
  40. cachep->ctor(node);
  41. }
  42. uatomic_inc(&nr_allocated);
  43. if (kmalloc_verbose)
  44. printf("Allocating %p from slab\n", node);
  45. return node;
  46. }
  47. void kmem_cache_free(struct kmem_cache *cachep, void *objp)
  48. {
  49. assert(objp);
  50. uatomic_dec(&nr_allocated);
  51. if (kmalloc_verbose)
  52. printf("Freeing %p to slab\n", objp);
  53. pthread_mutex_lock(&cachep->lock);
  54. if (cachep->nr_objs > 10) {
  55. memset(objp, POISON_FREE, cachep->size);
  56. free(objp);
  57. } else {
  58. struct radix_tree_node *node = objp;
  59. cachep->nr_objs++;
  60. node->parent = cachep->objs;
  61. cachep->objs = node;
  62. }
  63. pthread_mutex_unlock(&cachep->lock);
  64. }
  65. void *kmalloc(size_t size, gfp_t gfp)
  66. {
  67. void *ret;
  68. if (!(gfp & __GFP_DIRECT_RECLAIM))
  69. return NULL;
  70. ret = malloc(size);
  71. uatomic_inc(&nr_allocated);
  72. if (kmalloc_verbose)
  73. printf("Allocating %p from malloc\n", ret);
  74. if (gfp & __GFP_ZERO)
  75. memset(ret, 0, size);
  76. return ret;
  77. }
  78. void kfree(void *p)
  79. {
  80. if (!p)
  81. return;
  82. uatomic_dec(&nr_allocated);
  83. if (kmalloc_verbose)
  84. printf("Freeing %p to malloc\n", p);
  85. free(p);
  86. }
  87. struct kmem_cache *
  88. kmem_cache_create(const char *name, size_t size, size_t offset,
  89. unsigned long flags, void (*ctor)(void *))
  90. {
  91. struct kmem_cache *ret = malloc(sizeof(*ret));
  92. pthread_mutex_init(&ret->lock, NULL);
  93. ret->size = size;
  94. ret->nr_objs = 0;
  95. ret->objs = NULL;
  96. ret->ctor = ctor;
  97. return ret;
  98. }