kthread.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693
  1. /* Kernel thread helper functions.
  2. * Copyright (C) 2004 IBM Corporation, Rusty Russell.
  3. *
  4. * Creation is done via kthreadd, so that we get a clean environment
  5. * even if we're invoked from userspace (think modprobe, hotplug cpu,
  6. * etc.).
  7. */
  8. #include <linux/sched.h>
  9. #include <linux/kthread.h>
  10. #include <linux/completion.h>
  11. #include <linux/err.h>
  12. #include <linux/cpuset.h>
  13. #include <linux/unistd.h>
  14. #include <linux/file.h>
  15. #include <linux/export.h>
  16. #include <linux/mutex.h>
  17. #include <linux/slab.h>
  18. #include <linux/freezer.h>
  19. #include <linux/ptrace.h>
  20. #include <linux/uaccess.h>
  21. #include <trace/events/sched.h>
  22. static DEFINE_SPINLOCK(kthread_create_lock);
  23. static LIST_HEAD(kthread_create_list);
  24. struct task_struct *kthreadd_task;
  25. struct kthread_create_info
  26. {
  27. /* Information passed to kthread() from kthreadd. */
  28. int (*threadfn)(void *data);
  29. void *data;
  30. int node;
  31. /* Result passed back to kthread_create() from kthreadd. */
  32. struct task_struct *result;
  33. struct completion *done;
  34. struct list_head list;
  35. };
  36. struct kthread {
  37. unsigned long flags;
  38. unsigned int cpu;
  39. void *data;
  40. struct completion parked;
  41. struct completion exited;
  42. };
  43. enum KTHREAD_BITS {
  44. KTHREAD_IS_PER_CPU = 0,
  45. KTHREAD_SHOULD_STOP,
  46. KTHREAD_SHOULD_PARK,
  47. KTHREAD_IS_PARKED,
  48. };
  49. #define __to_kthread(vfork) \
  50. container_of(vfork, struct kthread, exited)
  51. static inline struct kthread *to_kthread(struct task_struct *k)
  52. {
  53. return __to_kthread(k->vfork_done);
  54. }
  55. static struct kthread *to_live_kthread(struct task_struct *k)
  56. {
  57. struct completion *vfork = ACCESS_ONCE(k->vfork_done);
  58. if (likely(vfork))
  59. return __to_kthread(vfork);
  60. return NULL;
  61. }
  62. /**
  63. * kthread_should_stop - should this kthread return now?
  64. *
  65. * When someone calls kthread_stop() on your kthread, it will be woken
  66. * and this will return true. You should then return, and your return
  67. * value will be passed through to kthread_stop().
  68. */
  69. bool kthread_should_stop(void)
  70. {
  71. return test_bit(KTHREAD_SHOULD_STOP, &to_kthread(current)->flags);
  72. }
  73. EXPORT_SYMBOL(kthread_should_stop);
  74. /**
  75. * kthread_should_park - should this kthread park now?
  76. *
  77. * When someone calls kthread_park() on your kthread, it will be woken
  78. * and this will return true. You should then do the necessary
  79. * cleanup and call kthread_parkme()
  80. *
  81. * Similar to kthread_should_stop(), but this keeps the thread alive
  82. * and in a park position. kthread_unpark() "restarts" the thread and
  83. * calls the thread function again.
  84. */
  85. bool kthread_should_park(void)
  86. {
  87. return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
  88. }
  89. /**
  90. * kthread_freezable_should_stop - should this freezable kthread return now?
  91. * @was_frozen: optional out parameter, indicates whether %current was frozen
  92. *
  93. * kthread_should_stop() for freezable kthreads, which will enter
  94. * refrigerator if necessary. This function is safe from kthread_stop() /
  95. * freezer deadlock and freezable kthreads should use this function instead
  96. * of calling try_to_freeze() directly.
  97. */
  98. bool kthread_freezable_should_stop(bool *was_frozen)
  99. {
  100. bool frozen = false;
  101. might_sleep();
  102. if (unlikely(freezing(current)))
  103. frozen = __refrigerator(true);
  104. if (was_frozen)
  105. *was_frozen = frozen;
  106. return kthread_should_stop();
  107. }
  108. EXPORT_SYMBOL_GPL(kthread_freezable_should_stop);
  109. /**
  110. * kthread_data - return data value specified on kthread creation
  111. * @task: kthread task in question
  112. *
  113. * Return the data value specified when kthread @task was created.
  114. * The caller is responsible for ensuring the validity of @task when
  115. * calling this function.
  116. */
  117. void *kthread_data(struct task_struct *task)
  118. {
  119. return to_kthread(task)->data;
  120. }
  121. /**
  122. * probe_kthread_data - speculative version of kthread_data()
  123. * @task: possible kthread task in question
  124. *
  125. * @task could be a kthread task. Return the data value specified when it
  126. * was created if accessible. If @task isn't a kthread task or its data is
  127. * inaccessible for any reason, %NULL is returned. This function requires
  128. * that @task itself is safe to dereference.
  129. */
  130. void *probe_kthread_data(struct task_struct *task)
  131. {
  132. struct kthread *kthread = to_kthread(task);
  133. void *data = NULL;
  134. probe_kernel_read(&data, &kthread->data, sizeof(data));
  135. return data;
  136. }
  137. static void __kthread_parkme(struct kthread *self)
  138. {
  139. __set_current_state(TASK_PARKED);
  140. while (test_bit(KTHREAD_SHOULD_PARK, &self->flags)) {
  141. if (!test_and_set_bit(KTHREAD_IS_PARKED, &self->flags))
  142. complete(&self->parked);
  143. schedule();
  144. __set_current_state(TASK_PARKED);
  145. }
  146. clear_bit(KTHREAD_IS_PARKED, &self->flags);
  147. __set_current_state(TASK_RUNNING);
  148. }
  149. void kthread_parkme(void)
  150. {
  151. __kthread_parkme(to_kthread(current));
  152. }
  153. static int kthread(void *_create)
  154. {
  155. /* Copy data: it's on kthread's stack */
  156. struct kthread_create_info *create = _create;
  157. int (*threadfn)(void *data) = create->threadfn;
  158. void *data = create->data;
  159. struct completion *done;
  160. struct kthread self;
  161. int ret;
  162. self.flags = 0;
  163. self.data = data;
  164. init_completion(&self.exited);
  165. init_completion(&self.parked);
  166. current->vfork_done = &self.exited;
  167. /* If user was SIGKILLed, I release the structure. */
  168. done = xchg(&create->done, NULL);
  169. if (!done) {
  170. kfree(create);
  171. do_exit(-EINTR);
  172. }
  173. /* OK, tell user we're spawned, wait for stop or wakeup */
  174. __set_current_state(TASK_UNINTERRUPTIBLE);
  175. create->result = current;
  176. complete(done);
  177. schedule();
  178. ret = -EINTR;
  179. if (!test_bit(KTHREAD_SHOULD_STOP, &self.flags)) {
  180. __kthread_parkme(&self);
  181. ret = threadfn(data);
  182. }
  183. /* we can't just return, we must preserve "self" on stack */
  184. do_exit(ret);
  185. }
  186. /* called from do_fork() to get node information for about to be created task */
  187. int tsk_fork_get_node(struct task_struct *tsk)
  188. {
  189. #ifdef CONFIG_NUMA
  190. if (tsk == kthreadd_task)
  191. return tsk->pref_node_fork;
  192. #endif
  193. return NUMA_NO_NODE;
  194. }
  195. static void create_kthread(struct kthread_create_info *create)
  196. {
  197. int pid;
  198. #ifdef CONFIG_NUMA
  199. current->pref_node_fork = create->node;
  200. #endif
  201. /* We want our own signal handler (we take no signals by default). */
  202. pid = kernel_thread(kthread, create, CLONE_FS | CLONE_FILES | SIGCHLD);
  203. if (pid < 0) {
  204. /* If user was SIGKILLed, I release the structure. */
  205. struct completion *done = xchg(&create->done, NULL);
  206. if (!done) {
  207. kfree(create);
  208. return;
  209. }
  210. create->result = ERR_PTR(pid);
  211. complete(done);
  212. }
  213. }
  214. /**
  215. * kthread_create_on_node - create a kthread.
  216. * @threadfn: the function to run until signal_pending(current).
  217. * @data: data ptr for @threadfn.
  218. * @node: memory node number.
  219. * @namefmt: printf-style name for the thread.
  220. *
  221. * Description: This helper function creates and names a kernel
  222. * thread. The thread will be stopped: use wake_up_process() to start
  223. * it. See also kthread_run().
  224. *
  225. * If thread is going to be bound on a particular cpu, give its node
  226. * in @node, to get NUMA affinity for kthread stack, or else give -1.
  227. * When woken, the thread will run @threadfn() with @data as its
  228. * argument. @threadfn() can either call do_exit() directly if it is a
  229. * standalone thread for which no one will call kthread_stop(), or
  230. * return when 'kthread_should_stop()' is true (which means
  231. * kthread_stop() has been called). The return value should be zero
  232. * or a negative error number; it will be passed to kthread_stop().
  233. *
  234. * Returns a task_struct or ERR_PTR(-ENOMEM) or ERR_PTR(-EINTR).
  235. */
  236. struct task_struct *kthread_create_on_node(int (*threadfn)(void *data),
  237. void *data, int node,
  238. const char namefmt[],
  239. ...)
  240. {
  241. DECLARE_COMPLETION_ONSTACK(done);
  242. struct task_struct *task;
  243. struct kthread_create_info *create = kmalloc(sizeof(*create),
  244. GFP_KERNEL);
  245. if (!create)
  246. return ERR_PTR(-ENOMEM);
  247. create->threadfn = threadfn;
  248. create->data = data;
  249. create->node = node;
  250. create->done = &done;
  251. spin_lock(&kthread_create_lock);
  252. list_add_tail(&create->list, &kthread_create_list);
  253. spin_unlock(&kthread_create_lock);
  254. wake_up_process(kthreadd_task);
  255. /*
  256. * Wait for completion in killable state, for I might be chosen by
  257. * the OOM killer while kthreadd is trying to allocate memory for
  258. * new kernel thread.
  259. */
  260. if (unlikely(wait_for_completion_killable(&done))) {
  261. /*
  262. * If I was SIGKILLed before kthreadd (or new kernel thread)
  263. * calls complete(), leave the cleanup of this structure to
  264. * that thread.
  265. */
  266. if (xchg(&create->done, NULL))
  267. return ERR_PTR(-EINTR);
  268. /*
  269. * kthreadd (or new kernel thread) will call complete()
  270. * shortly.
  271. */
  272. wait_for_completion(&done);
  273. }
  274. task = create->result;
  275. if (!IS_ERR(task)) {
  276. static const struct sched_param param = { .sched_priority = 0 };
  277. va_list args;
  278. va_start(args, namefmt);
  279. vsnprintf(task->comm, sizeof(task->comm), namefmt, args);
  280. va_end(args);
  281. /*
  282. * root may have changed our (kthreadd's) priority or CPU mask.
  283. * The kernel thread should not inherit these properties.
  284. */
  285. sched_setscheduler_nocheck(task, SCHED_NORMAL, &param);
  286. set_cpus_allowed_ptr(task, cpu_all_mask);
  287. }
  288. kfree(create);
  289. return task;
  290. }
  291. EXPORT_SYMBOL(kthread_create_on_node);
  292. static void __kthread_bind(struct task_struct *p, unsigned int cpu, long state)
  293. {
  294. /* Must have done schedule() in kthread() before we set_task_cpu */
  295. if (!wait_task_inactive(p, state)) {
  296. WARN_ON(1);
  297. return;
  298. }
  299. /* It's safe because the task is inactive. */
  300. do_set_cpus_allowed(p, cpumask_of(cpu));
  301. p->flags |= PF_NO_SETAFFINITY;
  302. }
  303. /**
  304. * kthread_bind - bind a just-created kthread to a cpu.
  305. * @p: thread created by kthread_create().
  306. * @cpu: cpu (might not be online, must be possible) for @k to run on.
  307. *
  308. * Description: This function is equivalent to set_cpus_allowed(),
  309. * except that @cpu doesn't need to be online, and the thread must be
  310. * stopped (i.e., just returned from kthread_create()).
  311. */
  312. void kthread_bind(struct task_struct *p, unsigned int cpu)
  313. {
  314. __kthread_bind(p, cpu, TASK_UNINTERRUPTIBLE);
  315. }
  316. EXPORT_SYMBOL(kthread_bind);
  317. /**
  318. * kthread_create_on_cpu - Create a cpu bound kthread
  319. * @threadfn: the function to run until signal_pending(current).
  320. * @data: data ptr for @threadfn.
  321. * @cpu: The cpu on which the thread should be bound,
  322. * @namefmt: printf-style name for the thread. Format is restricted
  323. * to "name.*%u". Code fills in cpu number.
  324. *
  325. * Description: This helper function creates and names a kernel thread
  326. * The thread will be woken and put into park mode.
  327. */
  328. struct task_struct *kthread_create_on_cpu(int (*threadfn)(void *data),
  329. void *data, unsigned int cpu,
  330. const char *namefmt)
  331. {
  332. struct task_struct *p;
  333. p = kthread_create_on_node(threadfn, data, cpu_to_node(cpu), namefmt,
  334. cpu);
  335. if (IS_ERR(p))
  336. return p;
  337. set_bit(KTHREAD_IS_PER_CPU, &to_kthread(p)->flags);
  338. to_kthread(p)->cpu = cpu;
  339. /* Park the thread to get it out of TASK_UNINTERRUPTIBLE state */
  340. kthread_park(p);
  341. return p;
  342. }
  343. static void __kthread_unpark(struct task_struct *k, struct kthread *kthread)
  344. {
  345. clear_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
  346. /*
  347. * We clear the IS_PARKED bit here as we don't wait
  348. * until the task has left the park code. So if we'd
  349. * park before that happens we'd see the IS_PARKED bit
  350. * which might be about to be cleared.
  351. */
  352. if (test_and_clear_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
  353. if (test_bit(KTHREAD_IS_PER_CPU, &kthread->flags))
  354. __kthread_bind(k, kthread->cpu, TASK_PARKED);
  355. wake_up_state(k, TASK_PARKED);
  356. }
  357. }
  358. /**
  359. * kthread_unpark - unpark a thread created by kthread_create().
  360. * @k: thread created by kthread_create().
  361. *
  362. * Sets kthread_should_park() for @k to return false, wakes it, and
  363. * waits for it to return. If the thread is marked percpu then its
  364. * bound to the cpu again.
  365. */
  366. void kthread_unpark(struct task_struct *k)
  367. {
  368. struct kthread *kthread = to_live_kthread(k);
  369. if (kthread)
  370. __kthread_unpark(k, kthread);
  371. }
  372. /**
  373. * kthread_park - park a thread created by kthread_create().
  374. * @k: thread created by kthread_create().
  375. *
  376. * Sets kthread_should_park() for @k to return true, wakes it, and
  377. * waits for it to return. This can also be called after kthread_create()
  378. * instead of calling wake_up_process(): the thread will park without
  379. * calling threadfn().
  380. *
  381. * Returns 0 if the thread is parked, -ENOSYS if the thread exited.
  382. * If called by the kthread itself just the park bit is set.
  383. */
  384. int kthread_park(struct task_struct *k)
  385. {
  386. struct kthread *kthread = to_live_kthread(k);
  387. int ret = -ENOSYS;
  388. if (kthread) {
  389. if (!test_bit(KTHREAD_IS_PARKED, &kthread->flags)) {
  390. set_bit(KTHREAD_SHOULD_PARK, &kthread->flags);
  391. if (k != current) {
  392. wake_up_process(k);
  393. wait_for_completion(&kthread->parked);
  394. }
  395. }
  396. ret = 0;
  397. }
  398. return ret;
  399. }
  400. /**
  401. * kthread_stop - stop a thread created by kthread_create().
  402. * @k: thread created by kthread_create().
  403. *
  404. * Sets kthread_should_stop() for @k to return true, wakes it, and
  405. * waits for it to exit. This can also be called after kthread_create()
  406. * instead of calling wake_up_process(): the thread will exit without
  407. * calling threadfn().
  408. *
  409. * If threadfn() may call do_exit() itself, the caller must ensure
  410. * task_struct can't go away.
  411. *
  412. * Returns the result of threadfn(), or %-EINTR if wake_up_process()
  413. * was never called.
  414. */
  415. int kthread_stop(struct task_struct *k)
  416. {
  417. struct kthread *kthread;
  418. int ret;
  419. trace_sched_kthread_stop(k);
  420. get_task_struct(k);
  421. kthread = to_live_kthread(k);
  422. if (kthread) {
  423. set_bit(KTHREAD_SHOULD_STOP, &kthread->flags);
  424. __kthread_unpark(k, kthread);
  425. wake_up_process(k);
  426. wait_for_completion(&kthread->exited);
  427. }
  428. ret = k->exit_code;
  429. put_task_struct(k);
  430. trace_sched_kthread_stop_ret(ret);
  431. return ret;
  432. }
  433. EXPORT_SYMBOL(kthread_stop);
  434. int kthreadd(void *unused)
  435. {
  436. struct task_struct *tsk = current;
  437. /* Setup a clean context for our children to inherit. */
  438. set_task_comm(tsk, "kthreadd");
  439. ignore_signals(tsk);
  440. set_cpus_allowed_ptr(tsk, cpu_all_mask);
  441. set_mems_allowed(node_states[N_MEMORY]);
  442. current->flags |= PF_NOFREEZE;
  443. for (;;) {
  444. set_current_state(TASK_INTERRUPTIBLE);
  445. if (list_empty(&kthread_create_list))
  446. schedule();
  447. __set_current_state(TASK_RUNNING);
  448. spin_lock(&kthread_create_lock);
  449. while (!list_empty(&kthread_create_list)) {
  450. struct kthread_create_info *create;
  451. create = list_entry(kthread_create_list.next,
  452. struct kthread_create_info, list);
  453. list_del_init(&create->list);
  454. spin_unlock(&kthread_create_lock);
  455. create_kthread(create);
  456. spin_lock(&kthread_create_lock);
  457. }
  458. spin_unlock(&kthread_create_lock);
  459. }
  460. return 0;
  461. }
  462. void __init_kthread_worker(struct kthread_worker *worker,
  463. const char *name,
  464. struct lock_class_key *key)
  465. {
  466. spin_lock_init(&worker->lock);
  467. lockdep_set_class_and_name(&worker->lock, key, name);
  468. INIT_LIST_HEAD(&worker->work_list);
  469. worker->task = NULL;
  470. }
  471. EXPORT_SYMBOL_GPL(__init_kthread_worker);
  472. /**
  473. * kthread_worker_fn - kthread function to process kthread_worker
  474. * @worker_ptr: pointer to initialized kthread_worker
  475. *
  476. * This function can be used as @threadfn to kthread_create() or
  477. * kthread_run() with @worker_ptr argument pointing to an initialized
  478. * kthread_worker. The started kthread will process work_list until
  479. * the it is stopped with kthread_stop(). A kthread can also call
  480. * this function directly after extra initialization.
  481. *
  482. * Different kthreads can be used for the same kthread_worker as long
  483. * as there's only one kthread attached to it at any given time. A
  484. * kthread_worker without an attached kthread simply collects queued
  485. * kthread_works.
  486. */
  487. int kthread_worker_fn(void *worker_ptr)
  488. {
  489. struct kthread_worker *worker = worker_ptr;
  490. struct kthread_work *work;
  491. WARN_ON(worker->task);
  492. worker->task = current;
  493. repeat:
  494. set_current_state(TASK_INTERRUPTIBLE); /* mb paired w/ kthread_stop */
  495. if (kthread_should_stop()) {
  496. __set_current_state(TASK_RUNNING);
  497. spin_lock_irq(&worker->lock);
  498. worker->task = NULL;
  499. spin_unlock_irq(&worker->lock);
  500. return 0;
  501. }
  502. work = NULL;
  503. spin_lock_irq(&worker->lock);
  504. if (!list_empty(&worker->work_list)) {
  505. work = list_first_entry(&worker->work_list,
  506. struct kthread_work, node);
  507. list_del_init(&work->node);
  508. }
  509. worker->current_work = work;
  510. spin_unlock_irq(&worker->lock);
  511. if (work) {
  512. __set_current_state(TASK_RUNNING);
  513. work->func(work);
  514. } else if (!freezing(current))
  515. schedule();
  516. try_to_freeze();
  517. goto repeat;
  518. }
  519. EXPORT_SYMBOL_GPL(kthread_worker_fn);
  520. /* insert @work before @pos in @worker */
  521. static void insert_kthread_work(struct kthread_worker *worker,
  522. struct kthread_work *work,
  523. struct list_head *pos)
  524. {
  525. lockdep_assert_held(&worker->lock);
  526. list_add_tail(&work->node, pos);
  527. work->worker = worker;
  528. if (!worker->current_work && likely(worker->task))
  529. wake_up_process(worker->task);
  530. }
  531. /**
  532. * queue_kthread_work - queue a kthread_work
  533. * @worker: target kthread_worker
  534. * @work: kthread_work to queue
  535. *
  536. * Queue @work to work processor @task for async execution. @task
  537. * must have been created with kthread_worker_create(). Returns %true
  538. * if @work was successfully queued, %false if it was already pending.
  539. */
  540. bool queue_kthread_work(struct kthread_worker *worker,
  541. struct kthread_work *work)
  542. {
  543. bool ret = false;
  544. unsigned long flags;
  545. spin_lock_irqsave(&worker->lock, flags);
  546. if (list_empty(&work->node)) {
  547. insert_kthread_work(worker, work, &worker->work_list);
  548. ret = true;
  549. }
  550. spin_unlock_irqrestore(&worker->lock, flags);
  551. return ret;
  552. }
  553. EXPORT_SYMBOL_GPL(queue_kthread_work);
  554. struct kthread_flush_work {
  555. struct kthread_work work;
  556. struct completion done;
  557. };
  558. static void kthread_flush_work_fn(struct kthread_work *work)
  559. {
  560. struct kthread_flush_work *fwork =
  561. container_of(work, struct kthread_flush_work, work);
  562. complete(&fwork->done);
  563. }
  564. /**
  565. * flush_kthread_work - flush a kthread_work
  566. * @work: work to flush
  567. *
  568. * If @work is queued or executing, wait for it to finish execution.
  569. */
  570. void flush_kthread_work(struct kthread_work *work)
  571. {
  572. struct kthread_flush_work fwork = {
  573. KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  574. COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  575. };
  576. struct kthread_worker *worker;
  577. bool noop = false;
  578. retry:
  579. worker = work->worker;
  580. if (!worker)
  581. return;
  582. spin_lock_irq(&worker->lock);
  583. if (work->worker != worker) {
  584. spin_unlock_irq(&worker->lock);
  585. goto retry;
  586. }
  587. if (!list_empty(&work->node))
  588. insert_kthread_work(worker, &fwork.work, work->node.next);
  589. else if (worker->current_work == work)
  590. insert_kthread_work(worker, &fwork.work, worker->work_list.next);
  591. else
  592. noop = true;
  593. spin_unlock_irq(&worker->lock);
  594. if (!noop)
  595. wait_for_completion(&fwork.done);
  596. }
  597. EXPORT_SYMBOL_GPL(flush_kthread_work);
  598. /**
  599. * flush_kthread_worker - flush all current works on a kthread_worker
  600. * @worker: worker to flush
  601. *
  602. * Wait until all currently executing or pending works on @worker are
  603. * finished.
  604. */
  605. void flush_kthread_worker(struct kthread_worker *worker)
  606. {
  607. struct kthread_flush_work fwork = {
  608. KTHREAD_WORK_INIT(fwork.work, kthread_flush_work_fn),
  609. COMPLETION_INITIALIZER_ONSTACK(fwork.done),
  610. };
  611. queue_kthread_work(worker, &fwork.work);
  612. wait_for_completion(&fwork.done);
  613. }
  614. EXPORT_SYMBOL_GPL(flush_kthread_worker);