smp.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. /*
  2. * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
  3. * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
  4. *
  5. * Based on arm64 and arc implementations
  6. * Copyright (C) 2013 ARM Ltd.
  7. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  8. *
  9. * This file is licensed under the terms of the GNU General Public License
  10. * version 2. This program is licensed "as is" without any warranty of any
  11. * kind, whether express or implied.
  12. */
  13. #include <linux/smp.h>
  14. #include <linux/cpu.h>
  15. #include <linux/sched.h>
  16. #include <linux/irq.h>
  17. #include <asm/cpuinfo.h>
  18. #include <asm/mmu_context.h>
  19. #include <asm/tlbflush.h>
  20. #include <asm/cacheflush.h>
  21. #include <asm/time.h>
  22. static void (*smp_cross_call)(const struct cpumask *, unsigned int);
  23. unsigned long secondary_release = -1;
  24. struct thread_info *secondary_thread_info;
  25. enum ipi_msg_type {
  26. IPI_WAKEUP,
  27. IPI_RESCHEDULE,
  28. IPI_CALL_FUNC,
  29. IPI_CALL_FUNC_SINGLE,
  30. };
  31. static DEFINE_SPINLOCK(boot_lock);
  32. static void boot_secondary(unsigned int cpu, struct task_struct *idle)
  33. {
  34. /*
  35. * set synchronisation state between this boot processor
  36. * and the secondary one
  37. */
  38. spin_lock(&boot_lock);
  39. secondary_release = cpu;
  40. smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
  41. /*
  42. * now the secondary core is starting up let it run its
  43. * calibrations, then wait for it to finish
  44. */
  45. spin_unlock(&boot_lock);
  46. }
  47. void __init smp_prepare_boot_cpu(void)
  48. {
  49. }
  50. void __init smp_init_cpus(void)
  51. {
  52. int i;
  53. for (i = 0; i < NR_CPUS; i++)
  54. set_cpu_possible(i, true);
  55. }
  56. void __init smp_prepare_cpus(unsigned int max_cpus)
  57. {
  58. int i;
  59. /*
  60. * Initialise the present map, which describes the set of CPUs
  61. * actually populated at the present time.
  62. */
  63. for (i = 0; i < max_cpus; i++)
  64. set_cpu_present(i, true);
  65. }
  66. void __init smp_cpus_done(unsigned int max_cpus)
  67. {
  68. }
  69. static DECLARE_COMPLETION(cpu_running);
  70. int __cpu_up(unsigned int cpu, struct task_struct *idle)
  71. {
  72. if (smp_cross_call == NULL) {
  73. pr_warn("CPU%u: failed to start, IPI controller missing",
  74. cpu);
  75. return -EIO;
  76. }
  77. secondary_thread_info = task_thread_info(idle);
  78. current_pgd[cpu] = init_mm.pgd;
  79. boot_secondary(cpu, idle);
  80. if (!wait_for_completion_timeout(&cpu_running,
  81. msecs_to_jiffies(1000))) {
  82. pr_crit("CPU%u: failed to start\n", cpu);
  83. return -EIO;
  84. }
  85. synchronise_count_master(cpu);
  86. return 0;
  87. }
  88. asmlinkage __init void secondary_start_kernel(void)
  89. {
  90. struct mm_struct *mm = &init_mm;
  91. unsigned int cpu = smp_processor_id();
  92. /*
  93. * All kernel threads share the same mm context; grab a
  94. * reference and switch to it.
  95. */
  96. atomic_inc(&mm->mm_count);
  97. current->active_mm = mm;
  98. cpumask_set_cpu(cpu, mm_cpumask(mm));
  99. pr_info("CPU%u: Booted secondary processor\n", cpu);
  100. setup_cpuinfo();
  101. openrisc_clockevent_init();
  102. notify_cpu_starting(cpu);
  103. /*
  104. * OK, now it's safe to let the boot CPU continue
  105. */
  106. complete(&cpu_running);
  107. synchronise_count_slave(cpu);
  108. set_cpu_online(cpu, true);
  109. local_irq_enable();
  110. preempt_disable();
  111. /*
  112. * OK, it's off to the idle thread for us
  113. */
  114. cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
  115. }
  116. void handle_IPI(unsigned int ipi_msg)
  117. {
  118. unsigned int cpu = smp_processor_id();
  119. switch (ipi_msg) {
  120. case IPI_WAKEUP:
  121. break;
  122. case IPI_RESCHEDULE:
  123. scheduler_ipi();
  124. break;
  125. case IPI_CALL_FUNC:
  126. generic_smp_call_function_interrupt();
  127. break;
  128. case IPI_CALL_FUNC_SINGLE:
  129. generic_smp_call_function_single_interrupt();
  130. break;
  131. default:
  132. WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
  133. break;
  134. }
  135. }
  136. void smp_send_reschedule(int cpu)
  137. {
  138. smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
  139. }
  140. static void stop_this_cpu(void *dummy)
  141. {
  142. /* Remove this CPU */
  143. set_cpu_online(smp_processor_id(), false);
  144. local_irq_disable();
  145. /* CPU Doze */
  146. if (mfspr(SPR_UPR) & SPR_UPR_PMP)
  147. mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
  148. /* If that didn't work, infinite loop */
  149. while (1)
  150. ;
  151. }
  152. void smp_send_stop(void)
  153. {
  154. smp_call_function(stop_this_cpu, NULL, 0);
  155. }
  156. /* not supported, yet */
  157. int setup_profiling_timer(unsigned int multiplier)
  158. {
  159. return -EINVAL;
  160. }
  161. void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
  162. {
  163. smp_cross_call = fn;
  164. }
  165. void arch_send_call_function_single_ipi(int cpu)
  166. {
  167. smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
  168. }
  169. void arch_send_call_function_ipi_mask(const struct cpumask *mask)
  170. {
  171. smp_cross_call(mask, IPI_CALL_FUNC);
  172. }
  173. /* TLB flush operations - Performed on each CPU*/
  174. static inline void ipi_flush_tlb_all(void *ignored)
  175. {
  176. local_flush_tlb_all();
  177. }
  178. void flush_tlb_all(void)
  179. {
  180. on_each_cpu(ipi_flush_tlb_all, NULL, 1);
  181. }
  182. /*
  183. * FIXME: implement proper functionality instead of flush_tlb_all.
  184. * *But*, as things currently stands, the local_tlb_flush_* functions will
  185. * all boil down to local_tlb_flush_all anyway.
  186. */
  187. void flush_tlb_mm(struct mm_struct *mm)
  188. {
  189. on_each_cpu(ipi_flush_tlb_all, NULL, 1);
  190. }
  191. void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
  192. {
  193. on_each_cpu(ipi_flush_tlb_all, NULL, 1);
  194. }
  195. void flush_tlb_range(struct vm_area_struct *vma,
  196. unsigned long start, unsigned long end)
  197. {
  198. on_each_cpu(ipi_flush_tlb_all, NULL, 1);
  199. }
  200. /* Instruction cache invalidate - performed on each cpu */
  201. static void ipi_icache_page_inv(void *arg)
  202. {
  203. struct page *page = arg;
  204. local_icache_page_inv(page);
  205. }
  206. void smp_icache_page_inv(struct page *page)
  207. {
  208. on_each_cpu(ipi_icache_page_inv, page, 1);
  209. }
  210. EXPORT_SYMBOL(smp_icache_page_inv);