mempool.h 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206
  1. #ifndef __EACSMB_mempool_h__
  2. #define __EACSMB_mempool_h__
  3. #include <stdlib.h>
  4. #include <stdint.h>
  5. // MemPool is a FAST, non-thread-safe allocator for fixed-size objects.
  6. // There are no system calls during allocation or freeing, though the kernel may run
  7. // interrupts when a new page of virtual memory is written to for the first time.
  8. // Does not:
  9. // Handle double free()'s. Only call free once.
  10. // Handle threads. Manage synchronization yourself.
  11. // Grow the pool dynamically. Request enough space from the start.
  12. // Reserve physical memory with the OS. It's purely virtual until you use it.
  13. // Check if the pointer you feed to free belongs to this pool. Be careful.
  14. typedef struct MemPool {
  15. size_t itemSize;
  16. size_t maxItems;
  17. size_t fill;
  18. size_t highestUsed;
  19. size_t firstFree;
  20. void* pool;
  21. } MemPool;
  22. // DO NOT USE THIS:
  23. // these allocate the pool itself
  24. MemPool* MemPool_alloc(size_t itemSize, size_t maxItems);
  25. void MemPool_init(MemPool* mp, size_t itemSize, size_t maxItems);
  26. // USE THIS:
  27. // these allocate chunks of memory from the pool
  28. void* MemPool_malloc(MemPool* mp);
  29. void* MemPool_calloc(MemPool* mp);
  30. void MemPool_free(MemPool* mp, void* ptr);
  31. // -----------------------------------------------------------------
  32. //
  33. // Tracked MemPool includes a bitfield tracking usage, making it iterable and double-free safe
  34. //
  35. // Does:
  36. // Handle double free()'s. Call free as much as you want.
  37. // Does not:
  38. // Handle threads. Manage synchronization yourself.
  39. // Grow the pool dynamically. Request enough space from the start.
  40. // Reserve physical memory with the OS. It's purely virtual until you use it.
  41. // Check if the pointer you feed to free belongs to this pool. Be careful.
  42. typedef struct MemPoolT /* tracked */ {
  43. size_t itemSize;
  44. size_t maxItems;
  45. size_t fill;
  46. size_t highestUsed;
  47. size_t firstFree;
  48. size_t bitfieldAlloc;
  49. uint64_t* bitfield;
  50. void* pool;
  51. } MemPoolT;
  52. // DO NOT USE THIS:
  53. // these allocate the pool itself
  54. MemPoolT* MemPoolT_alloc(size_t itemSize, size_t maxItems);
  55. void MemPoolT_init(MemPoolT* mp, size_t itemSize, size_t maxItems);
  56. // USE THIS:
  57. // these allocate chunks of memory from the pool
  58. void* MemPoolT_malloc(MemPoolT* mp);
  59. void MemPoolT_free(MemPoolT* mp, void* ptr);
  60. // these are 0-based indices used for iteration
  61. int MemPoolT_isSlotUsed(MemPoolT* mp, size_t index);
  62. void* MemPoolT_getNextUsedIndex(MemPoolT* mp, size_t* index);
  63. int MemPoolT_ownsPointer(MemPoolT* mp, void* ptr);
  64. static inline size_t MemPoolT_maxIndex(MemPoolT* mp) {
  65. return mp->highestUsed == 0 ? 0 : mp->highestUsed - 1;
  66. }
  67. static inline void* MemPoolT_getIndex(MemPoolT* mp, size_t index) {
  68. return mp->pool + (index * mp->itemSize);
  69. }
  70. // garbage in, garbage out. you have been warned.
  71. static inline size_t MemPoolT_indexOf(MemPoolT* mp, void* ptr) {
  72. return (ptr - mp->pool) / mp->itemSize;
  73. }
  74. // VECMP is a set of typesafe vector macros built around a tracked mempool
  75. // being backed by a mempool, pointers to items are stable forever
  76. #define VECMP(t) \
  77. struct { \
  78. t* lastInsert; \
  79. MemPoolT pool; \
  80. }
  81. // initialisze a mempool vector
  82. #define VECMP_INIT(x, maxItems) \
  83. do { \
  84. (x)->lastInsert = NULL; \
  85. MemPoolT_init(&(x)->pool, sizeof(*((x)->lastInsert)), maxItems); \
  86. } while(0)
  87. #define VECMP_INSERT(x, e) \
  88. do { \
  89. (x)->lastInsert = MemPoolT_malloc(&(x)->pool); \
  90. *((x)->lastInsert) = (e); \
  91. } while(0)
  92. #define VECMP_INC(x) \
  93. do { \
  94. (x)->lastInsert = MemPoolT_malloc(&(x)->pool); \
  95. } while(0)
  96. #define VECMP_LAST_INSERT(x) ((x)->lastInsert)
  97. #define VECMP_DELETE(x, ptr) \
  98. do { \
  99. MemPoolT_free(&(x)->pool, ptr); \
  100. } while(0)
  101. #define VECMP_LEN(x) ((x)->pool.fill)
  102. #define VECMP_OWNS_PTR(x, ptr) (MemPoolT_ownsPointer(&(x)->pool, ptr))
  103. #define VECMP_DATA(x) ( (typeof((x)->lastInsert)) ((x)->pool.pool) )
  104. #define VECMP_ITEM(x, index) (VECMP_DATA(x)[index])
  105. #define VECMP_INDEXOF(x, ptr) (MemPoolT_indexOf(&(x)->pool, ptr))
  106. #define VECMP_LAST_INS_INDEX(x) (MemPoolT_indexOf(&(x)->pool, (x)->lastInsert))
  107. #define VECMP__PASTEINNER(a, b) a ## b
  108. #define VECMP__PASTE(a, b) VECMP__PASTEINNER(a, b)
  109. #define VECMP__ITER(key, val) VECMP__PASTE(VEC_iter_ ## key ## __ ## val ## __, __LINE__)
  110. #define VECMP__FINISHED(key, val) VECMP__PASTE(VECMP_finished__ ## key ## __ ## val ## __, __LINE__)
  111. #define VECMP__MAINLOOP(key, val) VECMP__PASTE(VECMP_main_loop__ ## key ## __ ## val ## __, __LINE__)
  112. #define VECMP_EACH(obj, index, valname) \
  113. if(0) \
  114. VECMP__FINISHED(index, val): ; \
  115. else \
  116. for(typeof((obj)->lastInsert) valname ;;) \
  117. for(size_t index = 0;;) \
  118. if(index < MemPoolT_maxIndex(&(obj)->pool) && (valname = MemPoolT_getNextUsedIndex(&(obj)->pool, &index), 1)) \
  119. goto VECMP__MAINLOOP(index, val); \
  120. else \
  121. while(1) \
  122. if(1) { \
  123. goto VECMP__FINISHED(index, val); \
  124. } \
  125. else \
  126. while(1) \
  127. if(++index >= MemPoolT_maxIndex(&(obj)->pool) || (valname = MemPoolT_getNextUsedIndex(&(obj)->pool, &index), 0)) { \
  128. goto VECMP__FINISHED(index, val); \
  129. } \
  130. else \
  131. VECMP__MAINLOOP(index, val) :
  132. // { user block; not in macro }
  133. #endif //__EACSMB_mempool_h__