irq_work.c 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205
  1. /*
  2. * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
  3. *
  4. * Provides a framework for enqueueing and running callbacks from hardirq
  5. * context. The enqueueing is NMI-safe.
  6. */
  7. #include <linux/bug.h>
  8. #include <linux/kernel.h>
  9. #include <linux/export.h>
  10. #include <linux/irq_work.h>
  11. #include <linux/percpu.h>
  12. #include <linux/hardirq.h>
  13. #include <linux/irqflags.h>
  14. #include <linux/sched.h>
  15. #include <linux/tick.h>
  16. #include <linux/cpu.h>
  17. #include <linux/notifier.h>
  18. #include <linux/smp.h>
  19. #include <asm/processor.h>
  20. static DEFINE_PER_CPU(struct llist_head, raised_list);
  21. static DEFINE_PER_CPU(struct llist_head, lazy_list);
  22. /*
  23. * Claim the entry so that no one else will poke at it.
  24. */
  25. static bool irq_work_claim(struct irq_work *work)
  26. {
  27. unsigned long flags, oflags, nflags;
  28. /*
  29. * Start with our best wish as a premise but only trust any
  30. * flag value after cmpxchg() result.
  31. */
  32. flags = work->flags & ~IRQ_WORK_PENDING;
  33. for (;;) {
  34. nflags = flags | IRQ_WORK_CLAIMED;
  35. oflags = cmpxchg(&work->flags, flags, nflags);
  36. if (oflags == flags)
  37. break;
  38. if (oflags & IRQ_WORK_PENDING)
  39. return false;
  40. flags = oflags;
  41. cpu_relax();
  42. }
  43. return true;
  44. }
  45. void __weak arch_irq_work_raise(void)
  46. {
  47. /*
  48. * Lame architectures will get the timer tick callback
  49. */
  50. }
  51. /* Enqueue on current CPU, work must already be claimed and preempt disabled */
  52. static void __irq_work_queue_local(struct irq_work *work)
  53. {
  54. /* If the work is "lazy", handle it from next tick if any */
  55. if (work->flags & IRQ_WORK_LAZY) {
  56. if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
  57. tick_nohz_tick_stopped())
  58. arch_irq_work_raise();
  59. } else {
  60. if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
  61. arch_irq_work_raise();
  62. }
  63. }
  64. /* Enqueue the irq work @work on the current CPU */
  65. bool irq_work_queue(struct irq_work *work)
  66. {
  67. /* Only queue if not already pending */
  68. if (!irq_work_claim(work))
  69. return false;
  70. /* Queue the entry and raise the IPI if needed. */
  71. preempt_disable();
  72. __irq_work_queue_local(work);
  73. preempt_enable();
  74. return true;
  75. }
  76. EXPORT_SYMBOL_GPL(irq_work_queue);
  77. /*
  78. * Enqueue the irq_work @work on @cpu unless it's already pending
  79. * somewhere.
  80. *
  81. * Can be re-enqueued while the callback is still in progress.
  82. */
  83. bool irq_work_queue_on(struct irq_work *work, int cpu)
  84. {
  85. #ifndef CONFIG_SMP
  86. return irq_work_queue(work);
  87. #else /* CONFIG_SMP: */
  88. /* All work should have been flushed before going offline */
  89. WARN_ON_ONCE(cpu_is_offline(cpu));
  90. /* Only queue if not already pending */
  91. if (!irq_work_claim(work))
  92. return false;
  93. preempt_disable();
  94. if (cpu != smp_processor_id()) {
  95. /* Arch remote IPI send/receive backend aren't NMI safe */
  96. WARN_ON_ONCE(in_nmi());
  97. if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
  98. arch_send_call_function_single_ipi(cpu);
  99. } else {
  100. __irq_work_queue_local(work);
  101. }
  102. preempt_enable();
  103. return true;
  104. #endif /* CONFIG_SMP */
  105. }
  106. bool irq_work_needs_cpu(void)
  107. {
  108. struct llist_head *raised, *lazy;
  109. raised = this_cpu_ptr(&raised_list);
  110. lazy = this_cpu_ptr(&lazy_list);
  111. if (llist_empty(raised) || arch_irq_work_has_interrupt())
  112. if (llist_empty(lazy))
  113. return false;
  114. /* All work should have been flushed before going offline */
  115. WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
  116. return true;
  117. }
  118. static void irq_work_run_list(struct llist_head *list)
  119. {
  120. struct irq_work *work, *tmp;
  121. struct llist_node *llnode;
  122. unsigned long flags;
  123. BUG_ON(!irqs_disabled());
  124. if (llist_empty(list))
  125. return;
  126. llnode = llist_del_all(list);
  127. llist_for_each_entry_safe(work, tmp, llnode, llnode) {
  128. /*
  129. * Clear the PENDING bit, after this point the @work
  130. * can be re-used.
  131. * Make it immediately visible so that other CPUs trying
  132. * to claim that work don't rely on us to handle their data
  133. * while we are in the middle of the func.
  134. */
  135. flags = work->flags & ~IRQ_WORK_PENDING;
  136. xchg(&work->flags, flags);
  137. work->func(work);
  138. /*
  139. * Clear the BUSY bit and return to the free state if
  140. * no-one else claimed it meanwhile.
  141. */
  142. (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
  143. }
  144. }
  145. /*
  146. * hotplug calls this through:
  147. * hotplug_cfd() -> flush_smp_call_function_queue()
  148. */
  149. void irq_work_run(void)
  150. {
  151. irq_work_run_list(this_cpu_ptr(&raised_list));
  152. irq_work_run_list(this_cpu_ptr(&lazy_list));
  153. }
  154. EXPORT_SYMBOL_GPL(irq_work_run);
  155. void irq_work_tick(void)
  156. {
  157. struct llist_head *raised = this_cpu_ptr(&raised_list);
  158. if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
  159. irq_work_run_list(raised);
  160. irq_work_run_list(this_cpu_ptr(&lazy_list));
  161. }
  162. /*
  163. * Synchronize against the irq_work @entry, ensures the entry is not
  164. * currently in use.
  165. */
  166. void irq_work_sync(struct irq_work *work)
  167. {
  168. lockdep_assert_irqs_enabled();
  169. while (work->flags & IRQ_WORK_BUSY)
  170. cpu_relax();
  171. }
  172. EXPORT_SYMBOL_GPL(irq_work_sync);