cpuset.h 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_CPUSET_H
  3. #define _LINUX_CPUSET_H
  4. /*
  5. * cpuset interface
  6. *
  7. * Copyright (C) 2003 BULL SA
  8. * Copyright (C) 2004-2006 Silicon Graphics, Inc.
  9. *
  10. */
  11. #include <linux/sched.h>
  12. #include <linux/sched/topology.h>
  13. #include <linux/sched/task.h>
  14. #include <linux/cpumask.h>
  15. #include <linux/nodemask.h>
  16. #include <linux/mm.h>
  17. #include <linux/jump_label.h>
  18. #ifdef CONFIG_CPUSETS
  19. /*
  20. * Static branch rewrites can happen in an arbitrary order for a given
  21. * key. In code paths where we need to loop with read_mems_allowed_begin() and
  22. * read_mems_allowed_retry() to get a consistent view of mems_allowed, we need
  23. * to ensure that begin() always gets rewritten before retry() in the
  24. * disabled -> enabled transition. If not, then if local irqs are disabled
  25. * around the loop, we can deadlock since retry() would always be
  26. * comparing the latest value of the mems_allowed seqcount against 0 as
  27. * begin() still would see cpusets_enabled() as false. The enabled -> disabled
  28. * transition should happen in reverse order for the same reasons (want to stop
  29. * looking at real value of mems_allowed.sequence in retry() first).
  30. */
  31. extern struct static_key_false cpusets_pre_enable_key;
  32. extern struct static_key_false cpusets_enabled_key;
  33. static inline bool cpusets_enabled(void)
  34. {
  35. return static_branch_unlikely(&cpusets_enabled_key);
  36. }
  37. static inline void cpuset_inc(void)
  38. {
  39. static_branch_inc(&cpusets_pre_enable_key);
  40. static_branch_inc(&cpusets_enabled_key);
  41. }
  42. static inline void cpuset_dec(void)
  43. {
  44. static_branch_dec(&cpusets_enabled_key);
  45. static_branch_dec(&cpusets_pre_enable_key);
  46. }
  47. extern int cpuset_init(void);
  48. extern void cpuset_init_smp(void);
  49. extern void cpuset_force_rebuild(void);
  50. extern void cpuset_update_active_cpus(void);
  51. extern void cpuset_wait_for_hotplug(void);
  52. extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
  53. extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
  54. extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
  55. #define cpuset_current_mems_allowed (current->mems_allowed)
  56. void cpuset_init_current_mems_allowed(void);
  57. int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask);
  58. extern bool __cpuset_node_allowed(int node, gfp_t gfp_mask);
  59. static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
  60. {
  61. if (cpusets_enabled())
  62. return __cpuset_node_allowed(node, gfp_mask);
  63. return true;
  64. }
  65. static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  66. {
  67. return __cpuset_node_allowed(zone_to_nid(z), gfp_mask);
  68. }
  69. static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  70. {
  71. if (cpusets_enabled())
  72. return __cpuset_zone_allowed(z, gfp_mask);
  73. return true;
  74. }
  75. extern int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  76. const struct task_struct *tsk2);
  77. #define cpuset_memory_pressure_bump() \
  78. do { \
  79. if (cpuset_memory_pressure_enabled) \
  80. __cpuset_memory_pressure_bump(); \
  81. } while (0)
  82. extern int cpuset_memory_pressure_enabled;
  83. extern void __cpuset_memory_pressure_bump(void);
  84. extern void cpuset_task_status_allowed(struct seq_file *m,
  85. struct task_struct *task);
  86. extern int proc_cpuset_show(struct seq_file *m, struct pid_namespace *ns,
  87. struct pid *pid, struct task_struct *tsk);
  88. extern int cpuset_mem_spread_node(void);
  89. extern int cpuset_slab_spread_node(void);
  90. static inline int cpuset_do_page_mem_spread(void)
  91. {
  92. return task_spread_page(current);
  93. }
  94. static inline int cpuset_do_slab_mem_spread(void)
  95. {
  96. return task_spread_slab(current);
  97. }
  98. extern bool current_cpuset_is_being_rebound(void);
  99. extern void rebuild_sched_domains(void);
  100. extern void cpuset_print_current_mems_allowed(void);
  101. /*
  102. * read_mems_allowed_begin is required when making decisions involving
  103. * mems_allowed such as during page allocation. mems_allowed can be updated in
  104. * parallel and depending on the new value an operation can fail potentially
  105. * causing process failure. A retry loop with read_mems_allowed_begin and
  106. * read_mems_allowed_retry prevents these artificial failures.
  107. */
  108. static inline unsigned int read_mems_allowed_begin(void)
  109. {
  110. if (!static_branch_unlikely(&cpusets_pre_enable_key))
  111. return 0;
  112. return read_seqcount_begin(&current->mems_allowed_seq);
  113. }
  114. /*
  115. * If this returns true, the operation that took place after
  116. * read_mems_allowed_begin may have failed artificially due to a concurrent
  117. * update of mems_allowed. It is up to the caller to retry the operation if
  118. * appropriate.
  119. */
  120. static inline bool read_mems_allowed_retry(unsigned int seq)
  121. {
  122. if (!static_branch_unlikely(&cpusets_enabled_key))
  123. return false;
  124. return read_seqcount_retry(&current->mems_allowed_seq, seq);
  125. }
  126. static inline void set_mems_allowed(nodemask_t nodemask)
  127. {
  128. unsigned long flags;
  129. task_lock(current);
  130. local_irq_save(flags);
  131. write_seqcount_begin(&current->mems_allowed_seq);
  132. current->mems_allowed = nodemask;
  133. write_seqcount_end(&current->mems_allowed_seq);
  134. local_irq_restore(flags);
  135. task_unlock(current);
  136. }
  137. #else /* !CONFIG_CPUSETS */
  138. static inline bool cpusets_enabled(void) { return false; }
  139. static inline int cpuset_init(void) { return 0; }
  140. static inline void cpuset_init_smp(void) {}
  141. static inline void cpuset_force_rebuild(void) { }
  142. static inline void cpuset_update_active_cpus(void)
  143. {
  144. partition_sched_domains(1, NULL, NULL);
  145. }
  146. static inline void cpuset_wait_for_hotplug(void) { }
  147. static inline void cpuset_cpus_allowed(struct task_struct *p,
  148. struct cpumask *mask)
  149. {
  150. cpumask_copy(mask, cpu_possible_mask);
  151. }
  152. static inline void cpuset_cpus_allowed_fallback(struct task_struct *p)
  153. {
  154. }
  155. static inline nodemask_t cpuset_mems_allowed(struct task_struct *p)
  156. {
  157. return node_possible_map;
  158. }
  159. #define cpuset_current_mems_allowed (node_states[N_MEMORY])
  160. static inline void cpuset_init_current_mems_allowed(void) {}
  161. static inline int cpuset_nodemask_valid_mems_allowed(nodemask_t *nodemask)
  162. {
  163. return 1;
  164. }
  165. static inline bool cpuset_node_allowed(int node, gfp_t gfp_mask)
  166. {
  167. return true;
  168. }
  169. static inline bool __cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  170. {
  171. return true;
  172. }
  173. static inline bool cpuset_zone_allowed(struct zone *z, gfp_t gfp_mask)
  174. {
  175. return true;
  176. }
  177. static inline int cpuset_mems_allowed_intersects(const struct task_struct *tsk1,
  178. const struct task_struct *tsk2)
  179. {
  180. return 1;
  181. }
  182. static inline void cpuset_memory_pressure_bump(void) {}
  183. static inline void cpuset_task_status_allowed(struct seq_file *m,
  184. struct task_struct *task)
  185. {
  186. }
  187. static inline int cpuset_mem_spread_node(void)
  188. {
  189. return 0;
  190. }
  191. static inline int cpuset_slab_spread_node(void)
  192. {
  193. return 0;
  194. }
  195. static inline int cpuset_do_page_mem_spread(void)
  196. {
  197. return 0;
  198. }
  199. static inline int cpuset_do_slab_mem_spread(void)
  200. {
  201. return 0;
  202. }
  203. static inline bool current_cpuset_is_being_rebound(void)
  204. {
  205. return false;
  206. }
  207. static inline void rebuild_sched_domains(void)
  208. {
  209. partition_sched_domains(1, NULL, NULL);
  210. }
  211. static inline void cpuset_print_current_mems_allowed(void)
  212. {
  213. }
  214. static inline void set_mems_allowed(nodemask_t nodemask)
  215. {
  216. }
  217. static inline unsigned int read_mems_allowed_begin(void)
  218. {
  219. return 0;
  220. }
  221. static inline bool read_mems_allowed_retry(unsigned int seq)
  222. {
  223. return false;
  224. }
  225. #endif /* !CONFIG_CPUSETS */
  226. #endif /* _LINUX_CPUSET_H */