swait.c 2.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118
  1. #include <linux/sched.h>
  2. #include <linux/swait.h>
  3. void __init_swait_queue_head(struct swait_queue_head *q, const char *name,
  4. struct lock_class_key *key)
  5. {
  6. raw_spin_lock_init(&q->lock);
  7. lockdep_set_class_and_name(&q->lock, key, name);
  8. INIT_LIST_HEAD(&q->task_list);
  9. }
  10. EXPORT_SYMBOL(__init_swait_queue_head);
  11. /*
  12. * The thing about the wake_up_state() return value; I think we can ignore it.
  13. *
  14. * If for some reason it would return 0, that means the previously waiting
  15. * task is already running, so it will observe condition true (or has already).
  16. */
  17. void swake_up_locked(struct swait_queue_head *q)
  18. {
  19. struct swait_queue *curr;
  20. if (list_empty(&q->task_list))
  21. return;
  22. curr = list_first_entry(&q->task_list, typeof(*curr), task_list);
  23. wake_up_process(curr->task);
  24. list_del_init(&curr->task_list);
  25. }
  26. EXPORT_SYMBOL(swake_up_locked);
  27. void swake_up(struct swait_queue_head *q)
  28. {
  29. unsigned long flags;
  30. raw_spin_lock_irqsave(&q->lock, flags);
  31. swake_up_locked(q);
  32. raw_spin_unlock_irqrestore(&q->lock, flags);
  33. }
  34. EXPORT_SYMBOL(swake_up);
  35. /*
  36. * Does not allow usage from IRQ disabled, since we must be able to
  37. * release IRQs to guarantee bounded hold time.
  38. */
  39. void swake_up_all(struct swait_queue_head *q)
  40. {
  41. struct swait_queue *curr;
  42. LIST_HEAD(tmp);
  43. raw_spin_lock_irq(&q->lock);
  44. list_splice_init(&q->task_list, &tmp);
  45. while (!list_empty(&tmp)) {
  46. curr = list_first_entry(&tmp, typeof(*curr), task_list);
  47. wake_up_state(curr->task, TASK_NORMAL);
  48. list_del_init(&curr->task_list);
  49. if (list_empty(&tmp))
  50. break;
  51. raw_spin_unlock_irq(&q->lock);
  52. raw_spin_lock_irq(&q->lock);
  53. }
  54. raw_spin_unlock_irq(&q->lock);
  55. }
  56. EXPORT_SYMBOL(swake_up_all);
  57. void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait)
  58. {
  59. wait->task = current;
  60. if (list_empty(&wait->task_list))
  61. list_add(&wait->task_list, &q->task_list);
  62. }
  63. void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state)
  64. {
  65. unsigned long flags;
  66. raw_spin_lock_irqsave(&q->lock, flags);
  67. __prepare_to_swait(q, wait);
  68. set_current_state(state);
  69. raw_spin_unlock_irqrestore(&q->lock, flags);
  70. }
  71. EXPORT_SYMBOL(prepare_to_swait);
  72. long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state)
  73. {
  74. if (signal_pending_state(state, current))
  75. return -ERESTARTSYS;
  76. prepare_to_swait(q, wait, state);
  77. return 0;
  78. }
  79. EXPORT_SYMBOL(prepare_to_swait_event);
  80. void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
  81. {
  82. __set_current_state(TASK_RUNNING);
  83. if (!list_empty(&wait->task_list))
  84. list_del_init(&wait->task_list);
  85. }
  86. void finish_swait(struct swait_queue_head *q, struct swait_queue *wait)
  87. {
  88. unsigned long flags;
  89. __set_current_state(TASK_RUNNING);
  90. if (!list_empty_careful(&wait->task_list)) {
  91. raw_spin_lock_irqsave(&q->lock, flags);
  92. list_del_init(&wait->task_list);
  93. raw_spin_unlock_irqrestore(&q->lock, flags);
  94. }
  95. }
  96. EXPORT_SYMBOL(finish_swait);