123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205 |
- /*
- * Copyright (C) 2010 Red Hat, Inc., Peter Zijlstra
- *
- * Provides a framework for enqueueing and running callbacks from hardirq
- * context. The enqueueing is NMI-safe.
- */
- #include <linux/bug.h>
- #include <linux/kernel.h>
- #include <linux/export.h>
- #include <linux/irq_work.h>
- #include <linux/percpu.h>
- #include <linux/hardirq.h>
- #include <linux/irqflags.h>
- #include <linux/sched.h>
- #include <linux/tick.h>
- #include <linux/cpu.h>
- #include <linux/notifier.h>
- #include <linux/smp.h>
- #include <asm/processor.h>
- static DEFINE_PER_CPU(struct llist_head, raised_list);
- static DEFINE_PER_CPU(struct llist_head, lazy_list);
- /*
- * Claim the entry so that no one else will poke at it.
- */
- static bool irq_work_claim(struct irq_work *work)
- {
- unsigned long flags, oflags, nflags;
- /*
- * Start with our best wish as a premise but only trust any
- * flag value after cmpxchg() result.
- */
- flags = work->flags & ~IRQ_WORK_PENDING;
- for (;;) {
- nflags = flags | IRQ_WORK_CLAIMED;
- oflags = cmpxchg(&work->flags, flags, nflags);
- if (oflags == flags)
- break;
- if (oflags & IRQ_WORK_PENDING)
- return false;
- flags = oflags;
- cpu_relax();
- }
- return true;
- }
- void __weak arch_irq_work_raise(void)
- {
- /*
- * Lame architectures will get the timer tick callback
- */
- }
- /* Enqueue on current CPU, work must already be claimed and preempt disabled */
- static void __irq_work_queue_local(struct irq_work *work)
- {
- /* If the work is "lazy", handle it from next tick if any */
- if (work->flags & IRQ_WORK_LAZY) {
- if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
- tick_nohz_tick_stopped())
- arch_irq_work_raise();
- } else {
- if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
- arch_irq_work_raise();
- }
- }
- /* Enqueue the irq work @work on the current CPU */
- bool irq_work_queue(struct irq_work *work)
- {
- /* Only queue if not already pending */
- if (!irq_work_claim(work))
- return false;
- /* Queue the entry and raise the IPI if needed. */
- preempt_disable();
- __irq_work_queue_local(work);
- preempt_enable();
- return true;
- }
- EXPORT_SYMBOL_GPL(irq_work_queue);
- /*
- * Enqueue the irq_work @work on @cpu unless it's already pending
- * somewhere.
- *
- * Can be re-enqueued while the callback is still in progress.
- */
- bool irq_work_queue_on(struct irq_work *work, int cpu)
- {
- #ifndef CONFIG_SMP
- return irq_work_queue(work);
- #else /* CONFIG_SMP: */
- /* All work should have been flushed before going offline */
- WARN_ON_ONCE(cpu_is_offline(cpu));
- /* Only queue if not already pending */
- if (!irq_work_claim(work))
- return false;
- preempt_disable();
- if (cpu != smp_processor_id()) {
- /* Arch remote IPI send/receive backend aren't NMI safe */
- WARN_ON_ONCE(in_nmi());
- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
- arch_send_call_function_single_ipi(cpu);
- } else {
- __irq_work_queue_local(work);
- }
- preempt_enable();
- return true;
- #endif /* CONFIG_SMP */
- }
- bool irq_work_needs_cpu(void)
- {
- struct llist_head *raised, *lazy;
- raised = this_cpu_ptr(&raised_list);
- lazy = this_cpu_ptr(&lazy_list);
- if (llist_empty(raised) || arch_irq_work_has_interrupt())
- if (llist_empty(lazy))
- return false;
- /* All work should have been flushed before going offline */
- WARN_ON_ONCE(cpu_is_offline(smp_processor_id()));
- return true;
- }
- static void irq_work_run_list(struct llist_head *list)
- {
- struct irq_work *work, *tmp;
- struct llist_node *llnode;
- unsigned long flags;
- BUG_ON(!irqs_disabled());
- if (llist_empty(list))
- return;
- llnode = llist_del_all(list);
- llist_for_each_entry_safe(work, tmp, llnode, llnode) {
- /*
- * Clear the PENDING bit, after this point the @work
- * can be re-used.
- * Make it immediately visible so that other CPUs trying
- * to claim that work don't rely on us to handle their data
- * while we are in the middle of the func.
- */
- flags = work->flags & ~IRQ_WORK_PENDING;
- xchg(&work->flags, flags);
- work->func(work);
- /*
- * Clear the BUSY bit and return to the free state if
- * no-one else claimed it meanwhile.
- */
- (void)cmpxchg(&work->flags, flags, flags & ~IRQ_WORK_BUSY);
- }
- }
- /*
- * hotplug calls this through:
- * hotplug_cfd() -> flush_smp_call_function_queue()
- */
- void irq_work_run(void)
- {
- irq_work_run_list(this_cpu_ptr(&raised_list));
- irq_work_run_list(this_cpu_ptr(&lazy_list));
- }
- EXPORT_SYMBOL_GPL(irq_work_run);
- void irq_work_tick(void)
- {
- struct llist_head *raised = this_cpu_ptr(&raised_list);
- if (!llist_empty(raised) && !arch_irq_work_has_interrupt())
- irq_work_run_list(raised);
- irq_work_run_list(this_cpu_ptr(&lazy_list));
- }
- /*
- * Synchronize against the irq_work @entry, ensures the entry is not
- * currently in use.
- */
- void irq_work_sync(struct irq_work *work)
- {
- lockdep_assert_irqs_enabled();
- while (work->flags & IRQ_WORK_BUSY)
- cpu_relax();
- }
- EXPORT_SYMBOL_GPL(irq_work_sync);
|