irq_work.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197
  1. /*
  2. * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
  3. *
  4. * Provides a framework for enqueueing and running callbacks from hardirq
  5. * context. The enqueueing is NMI-safe.
  6. */
  7. #include <linux/bug.h>
  8. #include <linux/kernel.h>
  9. #include <linux/export.h>
  10. #include <linux/irq_work.h>
  11. #include <linux/percpu.h>
  12. #include <linux/hardirq.h>
  13. #include <linux/irqflags.h>
  14. #include <linux/sched.h>
  15. #include <linux/tick.h>
  16. #include <linux/cpu.h>
  17. #include <linux/notifier.h>
  18. #include <linux/smp.h>
  19. #include <asm/processor.h>
  20. static DEFINE_PER_CPU(struct llist_head, raised_list);
  21. static DEFINE_PER_CPU(struct llist_head, lazy_list);
  22. /*
  23. * Claim the entry so that no one else will poke at it.
  24. */
  25. static bool irq_work_claim(struct irq_work *work)
  26. {
  27. unsigned long flags, oflags, nflags;
  28. /*
  29. * Start with our best wish as a premise but only trust any
  30. * flag value after cmpxchg() result.
  31. */
  32. flags = work->flags & ~IRQ_WORK_PENDING;
  33. for (;;) {
  34. nflags = flags | IRQ_WORK_FLAGS;
  35. oflags = cmpxchg(&work->flags, flags, nflags);
  36. if (oflags == flags)
  37. break;
  38. if (oflags & IRQ_WORK_PENDING)
  39. return false;
  40. flags = oflags;
  41. cpu_relax();
  42. }
  43. return true;
  44. }
  45. void __weak arch_irq_work_raise(void)
  46. {
  47. /*
  48. * Lame architectures will get the timer tick callback
  49. */
  50. }
  51. #ifdef CONFIG_SMP
  52. /*
  53. * Enqueue the irq_work @work on @cpu unless it's already pending
  54. * somewhere.
  55. *
  56. * Can be re-enqueued while the callback is still in progress.
  57. */
  58. bool irq_work_queue_on(struct irq_work *work, int cpu)
  59. {
  60. /* All work should have been flushed before going offline */
  61. WARN_ON_ONCE(cpu_is_offline(cpu));
  62. /* Arch remote IPI send/receive backend aren't NMI safe */
  63. WARN_ON_ONCE(in_nmi());
  64. /* Only queue if not already pending */
  65. if (!irq_work_claim(work))
  66. return false;
  67. if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
  68. arch_send_call_function_single_ipi(cpu);
  69. return true;
  70. }
  71. EXPORT_SYMBOL_GPL(irq_work_queue_on);
  72. #endif
  73. /* Enqueue the irq work @work on the current CPU */
  74. bool irq_work_queue(struct irq_work *work)
  75. {
  76. /* Only queue if not already pending */
  77. if (!irq_work_claim(work))
  78. return false;
  79. /* Queue the entry and raise the IPI if needed. */
  80. preempt_disable();
  81. /* If the work is "lazy", handle it from next tick if any */
  82. if (work->flags & IRQ_WORK_LAZY) {
  83. if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
  84. tick_nohz_tick_stopped())
  85. arch_irq_work_raise();
  86. } else {
  87. if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
  88. arch_irq_work_raise();
  89. }
  90. preempt_enable();
  91. return true;
  92. }
  93. EXPORT_SYMBOL_GPL(irq_work_queue);
  94. bool irq_work_needs_cpu(void)
  95. {
  96. struct llist_head *raised, *lazy;
  97. raised = this_cpu_ptr(&raised_list);
  98. lazy = this_cpu_ptr(&lazy_list);
  99. if (llist_empty(raised) || arch_irq_work_has_interrupt())
  100. if (llist_empty(lazy))
  101. return false;
  102. /* All work should have been flushed before going offline */
  103. WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
  104. return true;
  105. }
  106. static void irq_work_run_list(struct llist_head *list)
  107. {
  108. unsigned long flags;
  109. struct irq_work *work;
  110. struct llist_node *llnode;
  111. BUG_ON(!irqs_disabled());
  112. if (llist_empty(list))
  113. return;
  114. llnode = llist_del_all(list);
  115. while (llnode != NULL) {
  116. work = llist_entry(llnode, struct irq_work, llnode);
  117. llnode = llist_next(llnode);
  118. /*
  119. * Clear the PENDING bit, after this point the @work
  120. * can be re-used.
  121. * Make it immediately visible so that other CPUs trying
  122. * to claim that work don't rely on us to handle their data
  123. * while we are in the middle of the func.
  124. */
  125. flags = work->flags & ~IRQ_WORK_PENDING;
  126. xchg(&work->flags, flags);
  127. work->func(work);
  128. /*
  129. * Clear the BUSY bit and return to the free state if
  130. * no-one else claimed it meanwhile.
  131. */
  132. (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
  133. }
  134. }
  135. /*
  136. * hotplug calls this through:
  137. * hotplug_cfd() -> flush_smp_call_function_queue()
  138. */
  139. void irq_work_run(void)
  140. {
  141. irq_work_run_list(this_cpu_ptr(&raised_list));
  142. irq_work_run_list(this_cpu_ptr(&lazy_list));
  143. }
  144. EXPORT_SYMBOL_GPL(irq_work_run);
  145. void irq_work_tick(void)
  146. {
  147. struct llist_head *raised = this_cpu_ptr(&raised_list);
  148. if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
  149. irq_work_run_list(raised);
  150. irq_work_run_list(this_cpu_ptr(&lazy_list));
  151. }
  152. /*
  153. * Synchronize against the irq_work @entry, ensures the entry is not
  154. * currently in use.
  155. */
  156. void irq_work_sync(struct irq_work *work)
  157. {
  158. WARN_ON_ONCE(irqs_disabled());
  159. while (work->flags & IRQ_WORK_BUSY)
  160. cpu_relax();
  161. }
  162. EXPORT_SYMBOL_GPL(irq_work_sync);