123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779 |
- /*
- * Generic helpers for smp ipi calls
- *
- * (C) Jens Axboe <jens.axboe@oracle.com> 2008
- */
- #include <linux/irq_work.h>
- #include <linux/rcupdate.h>
- #include <linux/rculist.h>
- #include <linux/kernel.h>
- #include <linux/export.h>
- #include <linux/percpu.h>
- #include <linux/init.h>
- #include <linux/gfp.h>
- #include <linux/smp.h>
- #include <linux/cpu.h>
- #include <linux/sched.h>
- #include <linux/hypervisor.h>
- #include "smpboot.h"
- enum {
- CSD_FLAG_LOCK = 0x01,
- CSD_FLAG_SYNCHRONOUS = 0x02,
- };
- struct call_function_data {
- struct call_single_data __percpu *csd;
- cpumask_var_t cpumask;
- };
- static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
- static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
- static void flush_smp_call_function_queue(bool warn_cpu_offline);
- int smpcfd_prepare_cpu(unsigned int cpu)
- {
- struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
- if (!zalloc_cpumask_var_node(&cfd->cpumask, GFP_KERNEL,
- cpu_to_node(cpu)))
- return -ENOMEM;
- cfd->csd = alloc_percpu(struct call_single_data);
- if (!cfd->csd) {
- free_cpumask_var(cfd->cpumask);
- return -ENOMEM;
- }
- return 0;
- }
- int smpcfd_dead_cpu(unsigned int cpu)
- {
- struct call_function_data *cfd = &per_cpu(cfd_data, cpu);
- free_cpumask_var(cfd->cpumask);
- free_percpu(cfd->csd);
- return 0;
- }
- int smpcfd_dying_cpu(unsigned int cpu)
- {
- /*
- * The IPIs for the smp-call-function callbacks queued by other
- * CPUs might arrive late, either due to hardware latencies or
- * because this CPU disabled interrupts (inside stop-machine)
- * before the IPIs were sent. So flush out any pending callbacks
- * explicitly (without waiting for the IPIs to arrive), to
- * ensure that the outgoing CPU doesn't go offline with work
- * still pending.
- */
- flush_smp_call_function_queue(false);
- return 0;
- }
- void __init call_function_init(void)
- {
- int i;
- for_each_possible_cpu(i)
- init_llist_head(&per_cpu(call_single_queue, i));
- smpcfd_prepare_cpu(smp_processor_id());
- }
- /*
- * csd_lock/csd_unlock used to serialize access to per-cpu csd resources
- *
- * For non-synchronous ipi calls the csd can still be in use by the
- * previous function call. For multi-cpu calls its even more interesting
- * as we'll have to ensure no other cpu is observing our csd.
- */
- static __always_inline void csd_lock_wait(struct call_single_data *csd)
- {
- smp_cond_load_acquire(&csd->flags, !(VAL & CSD_FLAG_LOCK));
- }
- static __always_inline void csd_lock(struct call_single_data *csd)
- {
- csd_lock_wait(csd);
- csd->flags |= CSD_FLAG_LOCK;
- /*
- * prevent CPU from reordering the above assignment
- * to ->flags with any subsequent assignments to other
- * fields of the specified call_single_data structure:
- */
- smp_wmb();
- }
- static __always_inline void csd_unlock(struct call_single_data *csd)
- {
- WARN_ON(!(csd->flags & CSD_FLAG_LOCK));
- /*
- * ensure we're all done before releasing data:
- */
- smp_store_release(&csd->flags, 0);
- }
- static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_single_data, csd_data);
- /*
- * Insert a previously allocated call_single_data element
- * for execution on the given CPU. data must already have
- * ->func, ->info, and ->flags set.
- */
- static int generic_exec_single(int cpu, struct call_single_data *csd,
- smp_call_func_t func, void *info)
- {
- if (cpu == smp_processor_id()) {
- unsigned long flags;
- /*
- * We can unlock early even for the synchronous on-stack case,
- * since we're doing this from the same CPU..
- */
- csd_unlock(csd);
- local_irq_save(flags);
- func(info);
- local_irq_restore(flags);
- return 0;
- }
- if ((unsigned)cpu >= nr_cpu_ids || !cpu_online(cpu)) {
- csd_unlock(csd);
- return -ENXIO;
- }
- csd->func = func;
- csd->info = info;
- /*
- * The list addition should be visible before sending the IPI
- * handler locks the list to pull the entry off it because of
- * normal cache coherency rules implied by spinlocks.
- *
- * If IPIs can go out of order to the cache coherency protocol
- * in an architecture, sufficient synchronisation should be added
- * to arch code to make it appear to obey cache coherency WRT
- * locking and barrier primitives. Generic code isn't really
- * equipped to do the right thing...
- */
- if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
- arch_send_call_function_single_ipi(cpu);
- return 0;
- }
- /**
- * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
- *
- * Invoked by arch to handle an IPI for call function single.
- * Must be called with interrupts disabled.
- */
- void generic_smp_call_function_single_interrupt(void)
- {
- flush_smp_call_function_queue(true);
- }
- /**
- * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
- *
- * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
- * offline CPU. Skip this check if set to 'false'.
- *
- * Flush any pending smp-call-function callbacks queued on this CPU. This is
- * invoked by the generic IPI handler, as well as by a CPU about to go offline,
- * to ensure that all pending IPI callbacks are run before it goes completely
- * offline.
- *
- * Loop through the call_single_queue and run all the queued callbacks.
- * Must be called with interrupts disabled.
- */
- static void flush_smp_call_function_queue(bool warn_cpu_offline)
- {
- struct llist_head *head;
- struct llist_node *entry;
- struct call_single_data *csd, *csd_next;
- static bool warned;
- WARN_ON(!irqs_disabled());
- head = this_cpu_ptr(&call_single_queue);
- entry = llist_del_all(head);
- entry = llist_reverse_order(entry);
- /* There shouldn't be any pending callbacks on an offline CPU. */
- if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
- !warned && !llist_empty(head))) {
- warned = true;
- WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
- /*
- * We don't have to use the _safe() variant here
- * because we are not invoking the IPI handlers yet.
- */
- llist_for_each_entry(csd, entry, llist)
- pr_warn("IPI callback %pS sent to offline CPU\n",
- csd->func);
- }
- llist_for_each_entry_safe(csd, csd_next, entry, llist) {
- smp_call_func_t func = csd->func;
- void *info = csd->info;
- /* Do we wait until *after* callback? */
- if (csd->flags & CSD_FLAG_SYNCHRONOUS) {
- func(info);
- csd_unlock(csd);
- } else {
- csd_unlock(csd);
- func(info);
- }
- }
- /*
- * Handle irq works queued remotely by irq_work_queue_on().
- * Smp functions above are typically synchronous so they
- * better run first since some other CPUs may be busy waiting
- * for them.
- */
- irq_work_run();
- }
- /*
- * smp_call_function_single - Run a function on a specific CPU
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait until function has completed on other CPUs.
- *
- * Returns 0 on success, else a negative status code.
- */
- int smp_call_function_single(int cpu, smp_call_func_t func, void *info,
- int wait)
- {
- struct call_single_data *csd;
- struct call_single_data csd_stack = { .flags = CSD_FLAG_LOCK | CSD_FLAG_SYNCHRONOUS };
- int this_cpu;
- int err;
- /*
- * prevent preemption and reschedule on another processor,
- * as well as CPU removal
- */
- this_cpu = get_cpu();
- /*
- * Can deadlock when called with interrupts disabled.
- * We allow cpu's that are not yet online though, as no one else can
- * send smp call function interrupt to this cpu and as such deadlocks
- * can't happen.
- */
- WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
- && !oops_in_progress);
- csd = &csd_stack;
- if (!wait) {
- csd = this_cpu_ptr(&csd_data);
- csd_lock(csd);
- }
- err = generic_exec_single(cpu, csd, func, info);
- if (wait)
- csd_lock_wait(csd);
- put_cpu();
- return err;
- }
- EXPORT_SYMBOL(smp_call_function_single);
- /**
- * smp_call_function_single_async(): Run an asynchronous function on a
- * specific CPU.
- * @cpu: The CPU to run on.
- * @csd: Pre-allocated and setup data structure
- *
- * Like smp_call_function_single(), but the call is asynchonous and
- * can thus be done from contexts with disabled interrupts.
- *
- * The caller passes his own pre-allocated data structure
- * (ie: embedded in an object) and is responsible for synchronizing it
- * such that the IPIs performed on the @csd are strictly serialized.
- *
- * NOTE: Be careful, there is unfortunately no current debugging facility to
- * validate the correctness of this serialization.
- */
- int smp_call_function_single_async(int cpu, struct call_single_data *csd)
- {
- int err = 0;
- preempt_disable();
- /* We could deadlock if we have to wait here with interrupts disabled! */
- if (WARN_ON_ONCE(csd->flags & CSD_FLAG_LOCK))
- csd_lock_wait(csd);
- csd->flags = CSD_FLAG_LOCK;
- smp_wmb();
- err = generic_exec_single(cpu, csd, csd->func, csd->info);
- preempt_enable();
- return err;
- }
- EXPORT_SYMBOL_GPL(smp_call_function_single_async);
- /*
- * smp_call_function_any - Run a function on any of the given cpus
- * @mask: The mask of cpus it can run on.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait until function has completed.
- *
- * Returns 0 on success, else a negative status code (if no cpus were online).
- *
- * Selection preference:
- * 1) current cpu if in @mask
- * 2) any cpu of current node if in @mask
- * 3) any other online cpu in @mask
- */
- int smp_call_function_any(const struct cpumask *mask,
- smp_call_func_t func, void *info, int wait)
- {
- unsigned int cpu;
- const struct cpumask *nodemask;
- int ret;
- /* Try for same CPU (cheapest) */
- cpu = get_cpu();
- if (cpumask_test_cpu(cpu, mask))
- goto call;
- /* Try for same node. */
- nodemask = cpumask_of_node(cpu_to_node(cpu));
- for (cpu = cpumask_first_and(nodemask, mask); cpu < nr_cpu_ids;
- cpu = cpumask_next_and(cpu, nodemask, mask)) {
- if (cpu_online(cpu))
- goto call;
- }
- /* Any online will do: smp_call_function_single handles nr_cpu_ids. */
- cpu = cpumask_any_and(mask, cpu_online_mask);
- call:
- ret = smp_call_function_single(cpu, func, info, wait);
- put_cpu();
- return ret;
- }
- EXPORT_SYMBOL_GPL(smp_call_function_any);
- /**
- * smp_call_function_many(): Run a function on a set of other CPUs.
- * @mask: The set of cpus to run on (only runs on online subset).
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed
- * on other CPUs.
- *
- * If @wait is true, then returns once @func has returned.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler. Preemption
- * must be disabled when calling this function.
- */
- void smp_call_function_many(const struct cpumask *mask,
- smp_call_func_t func, void *info, bool wait)
- {
- struct call_function_data *cfd;
- int cpu, next_cpu, this_cpu = smp_processor_id();
- /*
- * Can deadlock when called with interrupts disabled.
- * We allow cpu's that are not yet online though, as no one else can
- * send smp call function interrupt to this cpu and as such deadlocks
- * can't happen.
- */
- WARN_ON_ONCE(cpu_online(this_cpu) && irqs_disabled()
- && !oops_in_progress && !early_boot_irqs_disabled);
- /* Try to fastpath. So, what's a CPU they want? Ignoring this one. */
- cpu = cpumask_first_and(mask, cpu_online_mask);
- if (cpu == this_cpu)
- cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
- /* No online cpus? We're done. */
- if (cpu >= nr_cpu_ids)
- return;
- /* Do we have another CPU which isn't us? */
- next_cpu = cpumask_next_and(cpu, mask, cpu_online_mask);
- if (next_cpu == this_cpu)
- next_cpu = cpumask_next_and(next_cpu, mask, cpu_online_mask);
- /* Fastpath: do that cpu by itself. */
- if (next_cpu >= nr_cpu_ids) {
- smp_call_function_single(cpu, func, info, wait);
- return;
- }
- cfd = this_cpu_ptr(&cfd_data);
- cpumask_and(cfd->cpumask, mask, cpu_online_mask);
- cpumask_clear_cpu(this_cpu, cfd->cpumask);
- /* Some callers race with other cpus changing the passed mask */
- if (unlikely(!cpumask_weight(cfd->cpumask)))
- return;
- for_each_cpu(cpu, cfd->cpumask) {
- struct call_single_data *csd = per_cpu_ptr(cfd->csd, cpu);
- csd_lock(csd);
- if (wait)
- csd->flags |= CSD_FLAG_SYNCHRONOUS;
- csd->func = func;
- csd->info = info;
- llist_add(&csd->llist, &per_cpu(call_single_queue, cpu));
- }
- /* Send a message to all CPUs in the map */
- arch_send_call_function_ipi_mask(cfd->cpumask);
- if (wait) {
- for_each_cpu(cpu, cfd->cpumask) {
- struct call_single_data *csd;
- csd = per_cpu_ptr(cfd->csd, cpu);
- csd_lock_wait(csd);
- }
- }
- }
- EXPORT_SYMBOL(smp_call_function_many);
- /**
- * smp_call_function(): Run a function on all other CPUs.
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed
- * on other CPUs.
- *
- * Returns 0.
- *
- * If @wait is true, then returns once @func has returned; otherwise
- * it returns just before the target cpu calls @func.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler.
- */
- int smp_call_function(smp_call_func_t func, void *info, int wait)
- {
- preempt_disable();
- smp_call_function_many(cpu_online_mask, func, info, wait);
- preempt_enable();
- return 0;
- }
- EXPORT_SYMBOL(smp_call_function);
- /* Setup configured maximum number of CPUs to activate */
- unsigned int setup_max_cpus = NR_CPUS;
- EXPORT_SYMBOL(setup_max_cpus);
- /*
- * Setup routine for controlling SMP activation
- *
- * Command-line option of "nosmp" or "maxcpus=0" will disable SMP
- * activation entirely (the MPS table probe still happens, though).
- *
- * Command-line option of "maxcpus=<NUM>", where <NUM> is an integer
- * greater than 0, limits the maximum number of CPUs activated in
- * SMP mode to <NUM>.
- */
- void __weak arch_disable_smp_support(void) { }
- static int __init nosmp(char *str)
- {
- setup_max_cpus = 0;
- arch_disable_smp_support();
- return 0;
- }
- early_param("nosmp", nosmp);
- /* this is hard limit */
- static int __init nrcpus(char *str)
- {
- int nr_cpus;
- get_option(&str, &nr_cpus);
- if (nr_cpus > 0 && nr_cpus < nr_cpu_ids)
- nr_cpu_ids = nr_cpus;
- return 0;
- }
- early_param("nr_cpus", nrcpus);
- static int __init maxcpus(char *str)
- {
- get_option(&str, &setup_max_cpus);
- if (setup_max_cpus == 0)
- arch_disable_smp_support();
- return 0;
- }
- early_param("maxcpus", maxcpus);
- /* Setup number of possible processor ids */
- int nr_cpu_ids __read_mostly = NR_CPUS;
- EXPORT_SYMBOL(nr_cpu_ids);
- /* An arch may set nr_cpu_ids earlier if needed, so this would be redundant */
- void __init setup_nr_cpu_ids(void)
- {
- nr_cpu_ids = find_last_bit(cpumask_bits(cpu_possible_mask),NR_CPUS) + 1;
- }
- void __weak smp_announce(void)
- {
- printk(KERN_INFO "Brought up %d CPUs\n", num_online_cpus());
- }
- /* Called by boot processor to activate the rest. */
- void __init smp_init(void)
- {
- unsigned int cpu;
- idle_threads_init();
- cpuhp_threads_init();
- /* FIXME: This should be done in userspace --RR */
- for_each_present_cpu(cpu) {
- if (num_online_cpus() >= setup_max_cpus)
- break;
- if (!cpu_online(cpu))
- cpu_up(cpu);
- }
- /* Any cleanup work */
- smp_announce();
- smp_cpus_done(setup_max_cpus);
- }
- /*
- * Call a function on all processors. May be used during early boot while
- * early_boot_irqs_disabled is set. Use local_irq_save/restore() instead
- * of local_irq_disable/enable().
- */
- int on_each_cpu(void (*func) (void *info), void *info, int wait)
- {
- unsigned long flags;
- int ret = 0;
- preempt_disable();
- ret = smp_call_function(func, info, wait);
- local_irq_save(flags);
- func(info);
- local_irq_restore(flags);
- preempt_enable();
- return ret;
- }
- EXPORT_SYMBOL(on_each_cpu);
- /**
- * on_each_cpu_mask(): Run a function on processors specified by
- * cpumask, which may include the local processor.
- * @mask: The set of cpus to run on (only runs on online subset).
- * @func: The function to run. This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to the function.
- * @wait: If true, wait (atomically) until function has completed
- * on other CPUs.
- *
- * If @wait is true, then returns once @func has returned.
- *
- * You must not call this function with disabled interrupts or from a
- * hardware interrupt handler or from a bottom half handler. The
- * exception is that it may be used during early boot while
- * early_boot_irqs_disabled is set.
- */
- void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
- void *info, bool wait)
- {
- int cpu = get_cpu();
- smp_call_function_many(mask, func, info, wait);
- if (cpumask_test_cpu(cpu, mask)) {
- unsigned long flags;
- local_irq_save(flags);
- func(info);
- local_irq_restore(flags);
- }
- put_cpu();
- }
- EXPORT_SYMBOL(on_each_cpu_mask);
- /*
- * on_each_cpu_cond(): Call a function on each processor for which
- * the supplied function cond_func returns true, optionally waiting
- * for all the required CPUs to finish. This may include the local
- * processor.
- * @cond_func: A callback function that is passed a cpu id and
- * the the info parameter. The function is called
- * with preemption disabled. The function should
- * return a blooean value indicating whether to IPI
- * the specified CPU.
- * @func: The function to run on all applicable CPUs.
- * This must be fast and non-blocking.
- * @info: An arbitrary pointer to pass to both functions.
- * @wait: If true, wait (atomically) until function has
- * completed on other CPUs.
- * @gfp_flags: GFP flags to use when allocating the cpumask
- * used internally by the function.
- *
- * The function might sleep if the GFP flags indicates a non
- * atomic allocation is allowed.
- *
- * Preemption is disabled to protect against CPUs going offline but not online.
- * CPUs going online during the call will not be seen or sent an IPI.
- *
- * You must not call this function with disabled interrupts or
- * from a hardware interrupt handler or from a bottom half handler.
- */
- void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
- smp_call_func_t func, void *info, bool wait,
- gfp_t gfp_flags)
- {
- cpumask_var_t cpus;
- int cpu, ret;
- might_sleep_if(gfpflags_allow_blocking(gfp_flags));
- if (likely(zalloc_cpumask_var(&cpus, (gfp_flags|__GFP_NOWARN)))) {
- preempt_disable();
- for_each_online_cpu(cpu)
- if (cond_func(cpu, info))
- cpumask_set_cpu(cpu, cpus);
- on_each_cpu_mask(cpus, func, info, wait);
- preempt_enable();
- free_cpumask_var(cpus);
- } else {
- /*
- * No free cpumask, bother. No matter, we'll
- * just have to IPI them one by one.
- */
- preempt_disable();
- for_each_online_cpu(cpu)
- if (cond_func(cpu, info)) {
- ret = smp_call_function_single(cpu, func,
- info, wait);
- WARN_ON_ONCE(ret);
- }
- preempt_enable();
- }
- }
- EXPORT_SYMBOL(on_each_cpu_cond);
- static void do_nothing(void *unused)
- {
- }
- /**
- * kick_all_cpus_sync - Force all cpus out of idle
- *
- * Used to synchronize the update of pm_idle function pointer. It's
- * called after the pointer is updated and returns after the dummy
- * callback function has been executed on all cpus. The execution of
- * the function can only happen on the remote cpus after they have
- * left the idle function which had been called via pm_idle function
- * pointer. So it's guaranteed that nothing uses the previous pointer
- * anymore.
- */
- void kick_all_cpus_sync(void)
- {
- /* Make sure the change is visible before we kick the cpus */
- smp_mb();
- smp_call_function(do_nothing, NULL, 1);
- }
- EXPORT_SYMBOL_GPL(kick_all_cpus_sync);
- /**
- * wake_up_all_idle_cpus - break all cpus out of idle
- * wake_up_all_idle_cpus try to break all cpus which is in idle state even
- * including idle polling cpus, for non-idle cpus, we will do nothing
- * for them.
- */
- void wake_up_all_idle_cpus(void)
- {
- int cpu;
- preempt_disable();
- for_each_online_cpu(cpu) {
- if (cpu == smp_processor_id())
- continue;
- wake_up_if_idle(cpu);
- }
- preempt_enable();
- }
- EXPORT_SYMBOL_GPL(wake_up_all_idle_cpus);
- /**
- * smp_call_on_cpu - Call a function on a specific cpu
- *
- * Used to call a function on a specific cpu and wait for it to return.
- * Optionally make sure the call is done on a specified physical cpu via vcpu
- * pinning in order to support virtualized environments.
- */
- struct smp_call_on_cpu_struct {
- struct work_struct work;
- struct completion done;
- int (*func)(void *);
- void *data;
- int ret;
- int cpu;
- };
- static void smp_call_on_cpu_callback(struct work_struct *work)
- {
- struct smp_call_on_cpu_struct *sscs;
- sscs = container_of(work, struct smp_call_on_cpu_struct, work);
- if (sscs->cpu >= 0)
- hypervisor_pin_vcpu(sscs->cpu);
- sscs->ret = sscs->func(sscs->data);
- if (sscs->cpu >= 0)
- hypervisor_pin_vcpu(-1);
- complete(&sscs->done);
- }
- int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
- {
- struct smp_call_on_cpu_struct sscs = {
- .done = COMPLETION_INITIALIZER_ONSTACK(sscs.done),
- .func = func,
- .data = par,
- .cpu = phys ? cpu : -1,
- };
- INIT_WORK_ONSTACK(&sscs.work, smp_call_on_cpu_callback);
- if (cpu >= nr_cpu_ids || !cpu_online(cpu))
- return -ENXIO;
- queue_work_on(cpu, system_wq, &sscs.work);
- wait_for_completion(&sscs.done);
- return sscs.ret;
- }
- EXPORT_SYMBOL_GPL(smp_call_on_cpu);
|