percpu-internal.h 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _MM_PERCPU_INTERNAL_H
  3. #define _MM_PERCPU_INTERNAL_H
  4. #include <linux/types.h>
  5. #include <linux/percpu.h>
  6. /*
  7. * pcpu_block_md is the metadata block struct.
  8. * Each chunk's bitmap is split into a number of full blocks.
  9. * All units are in terms of bits.
  10. */
  11. struct pcpu_block_md {
  12. int contig_hint; /* contig hint for block */
  13. int contig_hint_start; /* block relative starting
  14. position of the contig hint */
  15. int left_free; /* size of free space along
  16. the left side of the block */
  17. int right_free; /* size of free space along
  18. the right side of the block */
  19. int first_free; /* block position of first free */
  20. };
  21. struct pcpu_chunk {
  22. #ifdef CONFIG_PERCPU_STATS
  23. int nr_alloc; /* # of allocations */
  24. size_t max_alloc_size; /* largest allocation size */
  25. #endif
  26. struct list_head list; /* linked to pcpu_slot lists */
  27. int free_bytes; /* free bytes in the chunk */
  28. int contig_bits; /* max contiguous size hint */
  29. int contig_bits_start; /* contig_bits starting
  30. offset */
  31. void *base_addr; /* base address of this chunk */
  32. unsigned long *alloc_map; /* allocation map */
  33. unsigned long *bound_map; /* boundary map */
  34. struct pcpu_block_md *md_blocks; /* metadata blocks */
  35. void *data; /* chunk data */
  36. int first_bit; /* no free below this */
  37. bool immutable; /* no [de]population allowed */
  38. int start_offset; /* the overlap with the previous
  39. region to have a page aligned
  40. base_addr */
  41. int end_offset; /* additional area required to
  42. have the region end page
  43. aligned */
  44. int nr_pages; /* # of pages served by this chunk */
  45. int nr_populated; /* # of populated pages */
  46. int nr_empty_pop_pages; /* # of empty populated pages */
  47. unsigned long populated[]; /* populated bitmap */
  48. };
  49. extern spinlock_t pcpu_lock;
  50. extern struct list_head *pcpu_slot;
  51. extern int pcpu_nr_slots;
  52. extern int pcpu_nr_empty_pop_pages;
  53. extern struct pcpu_chunk *pcpu_first_chunk;
  54. extern struct pcpu_chunk *pcpu_reserved_chunk;
  55. /**
  56. * pcpu_chunk_nr_blocks - converts nr_pages to # of md_blocks
  57. * @chunk: chunk of interest
  58. *
  59. * This conversion is from the number of physical pages that the chunk
  60. * serves to the number of bitmap blocks used.
  61. */
  62. static inline int pcpu_chunk_nr_blocks(struct pcpu_chunk *chunk)
  63. {
  64. return chunk->nr_pages * PAGE_SIZE / PCPU_BITMAP_BLOCK_SIZE;
  65. }
  66. /**
  67. * pcpu_nr_pages_to_map_bits - converts the pages to size of bitmap
  68. * @pages: number of physical pages
  69. *
  70. * This conversion is from physical pages to the number of bits
  71. * required in the bitmap.
  72. */
  73. static inline int pcpu_nr_pages_to_map_bits(int pages)
  74. {
  75. return pages * PAGE_SIZE / PCPU_MIN_ALLOC_SIZE;
  76. }
  77. /**
  78. * pcpu_chunk_map_bits - helper to convert nr_pages to size of bitmap
  79. * @chunk: chunk of interest
  80. *
  81. * This conversion is from the number of physical pages that the chunk
  82. * serves to the number of bits in the bitmap.
  83. */
  84. static inline int pcpu_chunk_map_bits(struct pcpu_chunk *chunk)
  85. {
  86. return pcpu_nr_pages_to_map_bits(chunk->nr_pages);
  87. }
  88. #ifdef CONFIG_PERCPU_STATS
  89. #include <linux/spinlock.h>
  90. struct percpu_stats {
  91. u64 nr_alloc; /* lifetime # of allocations */
  92. u64 nr_dealloc; /* lifetime # of deallocations */
  93. u64 nr_cur_alloc; /* current # of allocations */
  94. u64 nr_max_alloc; /* max # of live allocations */
  95. u32 nr_chunks; /* current # of live chunks */
  96. u32 nr_max_chunks; /* max # of live chunks */
  97. size_t min_alloc_size; /* min allocaiton size */
  98. size_t max_alloc_size; /* max allocation size */
  99. };
  100. extern struct percpu_stats pcpu_stats;
  101. extern struct pcpu_alloc_info pcpu_stats_ai;
  102. /*
  103. * For debug purposes. We don't care about the flexible array.
  104. */
  105. static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
  106. {
  107. memcpy(&pcpu_stats_ai, ai, sizeof(struct pcpu_alloc_info));
  108. /* initialize min_alloc_size to unit_size */
  109. pcpu_stats.min_alloc_size = pcpu_stats_ai.unit_size;
  110. }
  111. /*
  112. * pcpu_stats_area_alloc - increment area allocation stats
  113. * @chunk: the location of the area being allocated
  114. * @size: size of area to allocate in bytes
  115. *
  116. * CONTEXT:
  117. * pcpu_lock.
  118. */
  119. static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
  120. {
  121. lockdep_assert_held(&pcpu_lock);
  122. pcpu_stats.nr_alloc++;
  123. pcpu_stats.nr_cur_alloc++;
  124. pcpu_stats.nr_max_alloc =
  125. max(pcpu_stats.nr_max_alloc, pcpu_stats.nr_cur_alloc);
  126. pcpu_stats.min_alloc_size =
  127. min(pcpu_stats.min_alloc_size, size);
  128. pcpu_stats.max_alloc_size =
  129. max(pcpu_stats.max_alloc_size, size);
  130. chunk->nr_alloc++;
  131. chunk->max_alloc_size = max(chunk->max_alloc_size, size);
  132. }
  133. /*
  134. * pcpu_stats_area_dealloc - decrement allocation stats
  135. * @chunk: the location of the area being deallocated
  136. *
  137. * CONTEXT:
  138. * pcpu_lock.
  139. */
  140. static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
  141. {
  142. lockdep_assert_held(&pcpu_lock);
  143. pcpu_stats.nr_dealloc++;
  144. pcpu_stats.nr_cur_alloc--;
  145. chunk->nr_alloc--;
  146. }
  147. /*
  148. * pcpu_stats_chunk_alloc - increment chunk stats
  149. */
  150. static inline void pcpu_stats_chunk_alloc(void)
  151. {
  152. unsigned long flags;
  153. spin_lock_irqsave(&pcpu_lock, flags);
  154. pcpu_stats.nr_chunks++;
  155. pcpu_stats.nr_max_chunks =
  156. max(pcpu_stats.nr_max_chunks, pcpu_stats.nr_chunks);
  157. spin_unlock_irqrestore(&pcpu_lock, flags);
  158. }
  159. /*
  160. * pcpu_stats_chunk_dealloc - decrement chunk stats
  161. */
  162. static inline void pcpu_stats_chunk_dealloc(void)
  163. {
  164. unsigned long flags;
  165. spin_lock_irqsave(&pcpu_lock, flags);
  166. pcpu_stats.nr_chunks--;
  167. spin_unlock_irqrestore(&pcpu_lock, flags);
  168. }
  169. #else
  170. static inline void pcpu_stats_save_ai(const struct pcpu_alloc_info *ai)
  171. {
  172. }
  173. static inline void pcpu_stats_area_alloc(struct pcpu_chunk *chunk, size_t size)
  174. {
  175. }
  176. static inline void pcpu_stats_area_dealloc(struct pcpu_chunk *chunk)
  177. {
  178. }
  179. static inline void pcpu_stats_chunk_alloc(void)
  180. {
  181. }
  182. static inline void pcpu_stats_chunk_dealloc(void)
  183. {
  184. }
  185. #endif /* !CONFIG_PERCPU_STATS */
  186. #endif