slub_def.h 5.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_SLUB_DEF_H
  3. #define _LINUX_SLUB_DEF_H
  4. /*
  5. * SLUB : A Slab allocator without object queues.
  6. *
  7. * (C) 2007 SGI, Christoph Lameter
  8. */
  9. #include <linux/kobject.h>
  10. enum stat_item {
  11. ALLOC_FASTPATH, /* Allocation from cpu slab */
  12. ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
  13. FREE_FASTPATH, /* Free to cpu slab */
  14. FREE_SLOWPATH, /* Freeing not to cpu slab */
  15. FREE_FROZEN, /* Freeing to frozen slab */
  16. FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
  17. FREE_REMOVE_PARTIAL, /* Freeing removes last object */
  18. ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
  19. ALLOC_SLAB, /* Cpu slab acquired from page allocator */
  20. ALLOC_REFILL, /* Refill cpu slab from slab freelist */
  21. ALLOC_NODE_MISMATCH, /* Switching cpu slab */
  22. FREE_SLAB, /* Slab freed to the page allocator */
  23. CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
  24. DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
  25. DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
  26. DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
  27. DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
  28. DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
  29. DEACTIVATE_BYPASS, /* Implicit deactivation */
  30. ORDER_FALLBACK, /* Number of times fallback was necessary */
  31. CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
  32. CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
  33. CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
  34. CPU_PARTIAL_FREE, /* Refill cpu partial on free */
  35. CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
  36. CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
  37. NR_SLUB_STAT_ITEMS };
  38. struct kmem_cache_cpu {
  39. void **freelist; /* Pointer to next available object */
  40. unsigned long tid; /* Globally unique transaction id */
  41. struct page *page; /* The slab from which we are allocating */
  42. #ifdef CONFIG_SLUB_CPU_PARTIAL
  43. struct page *partial; /* Partially allocated frozen slabs */
  44. #endif
  45. #ifdef CONFIG_SLUB_STATS
  46. unsigned stat[NR_SLUB_STAT_ITEMS];
  47. #endif
  48. };
  49. #ifdef CONFIG_SLUB_CPU_PARTIAL
  50. #define slub_percpu_partial(c) ((c)->partial)
  51. #define slub_set_percpu_partial(c, p) \
  52. ({ \
  53. slub_percpu_partial(c) = (p)->next; \
  54. })
  55. #define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
  56. #else
  57. #define slub_percpu_partial(c) NULL
  58. #define slub_set_percpu_partial(c, p)
  59. #define slub_percpu_partial_read_once(c) NULL
  60. #endif // CONFIG_SLUB_CPU_PARTIAL
  61. /*
  62. * Word size structure that can be atomically updated or read and that
  63. * contains both the order and the number of objects that a slab of the
  64. * given order would contain.
  65. */
  66. struct kmem_cache_order_objects {
  67. unsigned int x;
  68. };
  69. /*
  70. * Slab cache management.
  71. */
  72. struct kmem_cache {
  73. struct kmem_cache_cpu __percpu *cpu_slab;
  74. /* Used for retriving partial slabs etc */
  75. slab_flags_t flags;
  76. unsigned long min_partial;
  77. unsigned int size; /* The size of an object including meta data */
  78. unsigned int object_size;/* The size of an object without meta data */
  79. unsigned int offset; /* Free pointer offset. */
  80. #ifdef CONFIG_SLUB_CPU_PARTIAL
  81. /* Number of per cpu partial objects to keep around */
  82. unsigned int cpu_partial;
  83. #endif
  84. struct kmem_cache_order_objects oo;
  85. /* Allocation and freeing of slabs */
  86. struct kmem_cache_order_objects max;
  87. struct kmem_cache_order_objects min;
  88. gfp_t allocflags; /* gfp flags to use on each alloc */
  89. int refcount; /* Refcount for slab cache destroy */
  90. void (*ctor)(void *);
  91. unsigned int inuse; /* Offset to metadata */
  92. unsigned int align; /* Alignment */
  93. unsigned int red_left_pad; /* Left redzone padding size */
  94. const char *name; /* Name (only for display!) */
  95. struct list_head list; /* List of slab caches */
  96. #ifdef CONFIG_SYSFS
  97. struct kobject kobj; /* For sysfs */
  98. struct work_struct kobj_remove_work;
  99. #endif
  100. #ifdef CONFIG_MEMCG
  101. struct memcg_cache_params memcg_params;
  102. /* for propagation, maximum size of a stored attr */
  103. unsigned int max_attr_size;
  104. #ifdef CONFIG_SYSFS
  105. struct kset *memcg_kset;
  106. #endif
  107. #endif
  108. #ifdef CONFIG_SLAB_FREELIST_HARDENED
  109. unsigned long random;
  110. #endif
  111. #ifdef CONFIG_NUMA
  112. /*
  113. * Defragmentation by allocating from a remote node.
  114. */
  115. unsigned int remote_node_defrag_ratio;
  116. #endif
  117. #ifdef CONFIG_SLAB_FREELIST_RANDOM
  118. unsigned int *random_seq;
  119. #endif
  120. #ifdef CONFIG_KASAN
  121. struct kasan_cache kasan_info;
  122. #endif
  123. unsigned int useroffset; /* Usercopy region offset */
  124. unsigned int usersize; /* Usercopy region size */
  125. struct kmem_cache_node *node[MAX_NUMNODES];
  126. };
  127. #ifdef CONFIG_SLUB_CPU_PARTIAL
  128. #define slub_cpu_partial(s) ((s)->cpu_partial)
  129. #define slub_set_cpu_partial(s, n) \
  130. ({ \
  131. slub_cpu_partial(s) = (n); \
  132. })
  133. #else
  134. #define slub_cpu_partial(s) (0)
  135. #define slub_set_cpu_partial(s, n)
  136. #endif // CONFIG_SLUB_CPU_PARTIAL
  137. #ifdef CONFIG_SYSFS
  138. #define SLAB_SUPPORTS_SYSFS
  139. void sysfs_slab_unlink(struct kmem_cache *);
  140. void sysfs_slab_release(struct kmem_cache *);
  141. #else
  142. static inline void sysfs_slab_unlink(struct kmem_cache *s)
  143. {
  144. }
  145. static inline void sysfs_slab_release(struct kmem_cache *s)
  146. {
  147. }
  148. #endif
  149. void object_err(struct kmem_cache *s, struct page *page,
  150. u8 *object, char *reason);
  151. void *fixup_red_left(struct kmem_cache *s, void *p);
  152. static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
  153. void *x) {
  154. void *object = x - (x - page_address(page)) % cache->size;
  155. void *last_object = page_address(page) +
  156. (page->objects - 1) * cache->size;
  157. void *result = (unlikely(object > last_object)) ? last_object : object;
  158. result = fixup_red_left(cache, result);
  159. return result;
  160. }
  161. #endif /* _LINUX_SLUB_DEF_H */