kthread.h 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_KTHREAD_H
  3. #define _LINUX_KTHREAD_H
  4. /* Simple interface for creating and stopping kernel threads without mess. */
  5. #include <linux/err.h>
  6. #include <linux/sched.h>
  7. #include <linux/cgroup.h>
  8. __printf(4, 5)
  9. struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
  10. void *data,
  11. int node,
  12. const char namefmt[], ...);
  13. /**
  14. * kthread_create - create a kthread on the current node
  15. * @threadfn: the function to run in the thread
  16. * @data: data pointer for @threadfn()
  17. * @namefmt: printf-style format string for the thread name
  18. * @arg...: arguments for @namefmt.
  19. *
  20. * This macro will create a kthread on the current node, leaving it in
  21. * the stopped state. This is just a helper for kthread_create_on_node();
  22. * see the documentation there for more details.
  23. */
  24. #define kthread_create(threadfn, data, namefmt, arg...) \
  25. kthread_create_on_node(threadfn, data, NUMA_NO_NODE, namefmt, ##arg)
  26. struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
  27. void *data,
  28. unsigned int cpu,
  29. const char *namefmt);
  30. /**
  31. * kthread_run - create and wake a thread.
  32. * @threadfn: the function to run until signal_pending(current).
  33. * @data: data ptr for @threadfn.
  34. * @namefmt: printf-style name for the thread.
  35. *
  36. * Description: Convenient wrapper for kthread_create() followed by
  37. * wake_up_process(). Returns the kthread or ERR_PTR(-ENOMEM).
  38. */
  39. #define kthread_run(threadfn, data, namefmt, ...) \
  40. ({ \
  41. struct task_struct *__k \
  42. = kthread_create(threadfn, data, namefmt, ## __VA_ARGS__); \
  43. if (!IS_ERR(__k)) \
  44. wake_up_process(__k); \
  45. __k; \
  46. })
  47. void free_kthread_struct(struct task_struct *k);
  48. void kthread_bind(struct task_struct *k, unsigned int cpu);
  49. void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
  50. int kthread_stop(struct task_struct *k);
  51. bool kthread_should_stop(void);
  52. bool kthread_should_park(void);
  53. bool kthread_freezable_should_stop(bool *was_frozen);
  54. void *kthread_data(struct task_struct *k);
  55. void *kthread_probe_data(struct task_struct *k);
  56. int kthread_park(struct task_struct *k);
  57. void kthread_unpark(struct task_struct *k);
  58. void kthread_parkme(void);
  59. int kthreadd(void *unused);
  60. extern struct task_struct *kthreadd_task;
  61. extern int tsk_fork_get_node(struct task_struct *tsk);
  62. /*
  63. * Simple work processor based on kthread.
  64. *
  65. * This provides easier way to make use of kthreads. A kthread_work
  66. * can be queued and flushed using queue/kthread_flush_work()
  67. * respectively. Queued kthread_works are processed by a kthread
  68. * running kthread_worker_fn().
  69. */
  70. struct kthread_work;
  71. typedef void (*kthread_work_func_t)(struct kthread_work *work);
  72. void kthread_delayed_work_timer_fn(struct timer_list *t);
  73. enum {
  74. KTW_FREEZABLE = 1 << 0, /* freeze during suspend */
  75. };
  76. struct kthread_worker {
  77. unsigned int flags;
  78. spinlock_t lock;
  79. struct list_head work_list;
  80. struct list_head delayed_work_list;
  81. struct task_struct *task;
  82. struct kthread_work *current_work;
  83. };
  84. struct kthread_work {
  85. struct list_head node;
  86. kthread_work_func_t func;
  87. struct kthread_worker *worker;
  88. /* Number of canceling calls that are running at the moment. */
  89. int canceling;
  90. };
  91. struct kthread_delayed_work {
  92. struct kthread_work work;
  93. struct timer_list timer;
  94. };
  95. #define KTHREAD_WORKER_INIT(worker) { \
  96. .lock = __SPIN_LOCK_UNLOCKED((worker).lock), \
  97. .work_list = LIST_HEAD_INIT((worker).work_list), \
  98. .delayed_work_list = LIST_HEAD_INIT((worker).delayed_work_list),\
  99. }
  100. #define KTHREAD_WORK_INIT(work, fn) { \
  101. .node = LIST_HEAD_INIT((work).node), \
  102. .func = (fn), \
  103. }
  104. #define KTHREAD_DELAYED_WORK_INIT(dwork, fn) { \
  105. .work = KTHREAD_WORK_INIT((dwork).work, (fn)), \
  106. .timer = __TIMER_INITIALIZER(kthread_delayed_work_timer_fn,\
  107. TIMER_IRQSAFE), \
  108. }
  109. #define DEFINE_KTHREAD_WORKER(worker) \
  110. struct kthread_worker worker = KTHREAD_WORKER_INIT(worker)
  111. #define DEFINE_KTHREAD_WORK(work, fn) \
  112. struct kthread_work work = KTHREAD_WORK_INIT(work, fn)
  113. #define DEFINE_KTHREAD_DELAYED_WORK(dwork, fn) \
  114. struct kthread_delayed_work dwork = \
  115. KTHREAD_DELAYED_WORK_INIT(dwork, fn)
  116. /*
  117. * kthread_worker.lock needs its own lockdep class key when defined on
  118. * stack with lockdep enabled. Use the following macros in such cases.
  119. */
  120. #ifdef CONFIG_LOCKDEP
  121. # define KTHREAD_WORKER_INIT_ONSTACK(worker) \
  122. ({ kthread_init_worker(&worker); worker; })
  123. # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) \
  124. struct kthread_worker worker = KTHREAD_WORKER_INIT_ONSTACK(worker)
  125. #else
  126. # define DEFINE_KTHREAD_WORKER_ONSTACK(worker) DEFINE_KTHREAD_WORKER(worker)
  127. #endif
  128. extern void __kthread_init_worker(struct kthread_worker *worker,
  129. const char *name, struct lock_class_key *key);
  130. #define kthread_init_worker(worker) \
  131. do { \
  132. static struct lock_class_key __key; \
  133. __kthread_init_worker((worker), "("#worker")->lock", &__key); \
  134. } while (0)
  135. #define kthread_init_work(work, fn) \
  136. do { \
  137. memset((work), 0, sizeof(struct kthread_work)); \
  138. INIT_LIST_HEAD(&(work)->node); \
  139. (work)->func = (fn); \
  140. } while (0)
  141. #define kthread_init_delayed_work(dwork, fn) \
  142. do { \
  143. kthread_init_work(&(dwork)->work, (fn)); \
  144. __init_timer(&(dwork)->timer, \
  145. kthread_delayed_work_timer_fn, \
  146. TIMER_IRQSAFE); \
  147. } while (0)
  148. int kthread_worker_fn(void *worker_ptr);
  149. __printf(2, 3)
  150. struct kthread_worker *
  151. kthread_create_worker(unsigned int flags, const char namefmt[], ...);
  152. __printf(3, 4) struct kthread_worker *
  153. kthread_create_worker_on_cpu(int cpu, unsigned int flags,
  154. const char namefmt[], ...);
  155. bool kthread_queue_work(struct kthread_worker *worker,
  156. struct kthread_work *work);
  157. bool kthread_queue_delayed_work(struct kthread_worker *worker,
  158. struct kthread_delayed_work *dwork,
  159. unsigned long delay);
  160. bool kthread_mod_delayed_work(struct kthread_worker *worker,
  161. struct kthread_delayed_work *dwork,
  162. unsigned long delay);
  163. void kthread_flush_work(struct kthread_work *work);
  164. void kthread_flush_worker(struct kthread_worker *worker);
  165. bool kthread_cancel_work_sync(struct kthread_work *work);
  166. bool kthread_cancel_delayed_work_sync(struct kthread_delayed_work *work);
  167. void kthread_destroy_worker(struct kthread_worker *worker);
  168. #ifdef CONFIG_BLK_CGROUP
  169. void kthread_associate_blkcg(struct cgroup_subsys_state *css);
  170. struct cgroup_subsys_state *kthread_blkcg(void);
  171. #else
  172. static inline void kthread_associate_blkcg(struct cgroup_subsys_state *css) { }
  173. static inline struct cgroup_subsys_state *kthread_blkcg(void)
  174. {
  175. return NULL;
  176. }
  177. #endif
  178. #endif /* _LINUX_KTHREAD_H */