smp_pv.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Xen SMP support
  4. *
  5. * This file implements the Xen versions of smp_ops. SMP under Xen is
  6. * very straightforward. Bringing a CPU up is simply a matter of
  7. * loading its initial context and setting it running.
  8. *
  9. * IPIs are handled through the Xen event mechanism.
  10. *
  11. * Because virtual CPUs can be scheduled onto any real CPU, there's no
  12. * useful topology information for the kernel to make use of. As a
  13. * result, all CPUs are treated as if they're single-core and
  14. * single-threaded.
  15. */
  16. #include <linux/sched.h>
  17. #include <linux/sched/task_stack.h>
  18. #include <linux/err.h>
  19. #include <linux/slab.h>
  20. #include <linux/smp.h>
  21. #include <linux/irq_work.h>
  22. #include <linux/tick.h>
  23. #include <linux/nmi.h>
  24. #include <linux/cpuhotplug.h>
  25. #include <asm/paravirt.h>
  26. #include <asm/desc.h>
  27. #include <asm/pgtable.h>
  28. #include <asm/cpu.h>
  29. #include <xen/interface/xen.h>
  30. #include <xen/interface/vcpu.h>
  31. #include <xen/interface/xenpmu.h>
  32. #include <asm/spec-ctrl.h>
  33. #include <asm/xen/interface.h>
  34. #include <asm/xen/hypercall.h>
  35. #include <xen/xen.h>
  36. #include <xen/page.h>
  37. #include <xen/events.h>
  38. #include <xen/hvc-console.h>
  39. #include "xen-ops.h"
  40. #include "mmu.h"
  41. #include "smp.h"
  42. #include "pmu.h"
  43. cpumask_var_t xen_cpu_initialized_map;
  44. static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 };
  45. static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 };
  46. static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
  47. static void cpu_bringup(void)
  48. {
  49. int cpu;
  50. cpu_init();
  51. touch_softlockup_watchdog();
  52. preempt_disable();
  53. /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */
  54. if (!xen_feature(XENFEAT_supervisor_mode_kernel)) {
  55. xen_enable_sysenter();
  56. xen_enable_syscall();
  57. }
  58. cpu = smp_processor_id();
  59. smp_store_cpu_info(cpu);
  60. cpu_data(cpu).x86_max_cores = 1;
  61. set_cpu_sibling_map(cpu);
  62. speculative_store_bypass_ht_init();
  63. xen_setup_cpu_clockevents();
  64. notify_cpu_starting(cpu);
  65. set_cpu_online(cpu, true);
  66. cpu_set_state_online(cpu); /* Implies full memory barrier. */
  67. /* We can take interrupts now: we're officially "up". */
  68. local_irq_enable();
  69. }
  70. asmlinkage __visible void cpu_bringup_and_idle(void)
  71. {
  72. cpu_bringup();
  73. cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
  74. }
  75. void xen_smp_intr_free_pv(unsigned int cpu)
  76. {
  77. if (per_cpu(xen_irq_work, cpu).irq >= 0) {
  78. unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL);
  79. per_cpu(xen_irq_work, cpu).irq = -1;
  80. kfree(per_cpu(xen_irq_work, cpu).name);
  81. per_cpu(xen_irq_work, cpu).name = NULL;
  82. }
  83. if (per_cpu(xen_pmu_irq, cpu).irq >= 0) {
  84. unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL);
  85. per_cpu(xen_pmu_irq, cpu).irq = -1;
  86. kfree(per_cpu(xen_pmu_irq, cpu).name);
  87. per_cpu(xen_pmu_irq, cpu).name = NULL;
  88. }
  89. }
  90. int xen_smp_intr_init_pv(unsigned int cpu)
  91. {
  92. int rc;
  93. char *callfunc_name, *pmu_name;
  94. callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
  95. rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
  96. cpu,
  97. xen_irq_work_interrupt,
  98. IRQF_PERCPU|IRQF_NOBALANCING,
  99. callfunc_name,
  100. NULL);
  101. if (rc < 0)
  102. goto fail;
  103. per_cpu(xen_irq_work, cpu).irq = rc;
  104. per_cpu(xen_irq_work, cpu).name = callfunc_name;
  105. if (is_xen_pmu(cpu)) {
  106. pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu);
  107. rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu,
  108. xen_pmu_irq_handler,
  109. IRQF_PERCPU|IRQF_NOBALANCING,
  110. pmu_name, NULL);
  111. if (rc < 0)
  112. goto fail;
  113. per_cpu(xen_pmu_irq, cpu).irq = rc;
  114. per_cpu(xen_pmu_irq, cpu).name = pmu_name;
  115. }
  116. return 0;
  117. fail:
  118. xen_smp_intr_free_pv(cpu);
  119. return rc;
  120. }
  121. static void __init xen_fill_possible_map(void)
  122. {
  123. int i, rc;
  124. if (xen_initial_domain())
  125. return;
  126. for (i = 0; i < nr_cpu_ids; i++) {
  127. rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
  128. if (rc >= 0) {
  129. num_processors++;
  130. set_cpu_possible(i, true);
  131. }
  132. }
  133. }
  134. static void __init xen_filter_cpu_maps(void)
  135. {
  136. int i, rc;
  137. unsigned int subtract = 0;
  138. if (!xen_initial_domain())
  139. return;
  140. num_processors = 0;
  141. disabled_cpus = 0;
  142. for (i = 0; i < nr_cpu_ids; i++) {
  143. rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
  144. if (rc >= 0) {
  145. num_processors++;
  146. set_cpu_possible(i, true);
  147. } else {
  148. set_cpu_possible(i, false);
  149. set_cpu_present(i, false);
  150. subtract++;
  151. }
  152. }
  153. #ifdef CONFIG_HOTPLUG_CPU
  154. /* This is akin to using 'nr_cpus' on the Linux command line.
  155. * Which is OK as when we use 'dom0_max_vcpus=X' we can only
  156. * have up to X, while nr_cpu_ids is greater than X. This
  157. * normally is not a problem, except when CPU hotplugging
  158. * is involved and then there might be more than X CPUs
  159. * in the guest - which will not work as there is no
  160. * hypercall to expand the max number of VCPUs an already
  161. * running guest has. So cap it up to X. */
  162. if (subtract)
  163. nr_cpu_ids = nr_cpu_ids - subtract;
  164. #endif
  165. }
  166. static void __init xen_pv_smp_prepare_boot_cpu(void)
  167. {
  168. BUG_ON(smp_processor_id() != 0);
  169. native_smp_prepare_boot_cpu();
  170. if (!xen_feature(XENFEAT_writable_page_tables))
  171. /* We've switched to the "real" per-cpu gdt, so make
  172. * sure the old memory can be recycled. */
  173. make_lowmem_page_readwrite(xen_initial_gdt);
  174. #ifdef CONFIG_X86_32
  175. /*
  176. * Xen starts us with XEN_FLAT_RING1_DS, but linux code
  177. * expects __USER_DS
  178. */
  179. loadsegment(ds, __USER_DS);
  180. loadsegment(es, __USER_DS);
  181. #endif
  182. xen_filter_cpu_maps();
  183. xen_setup_vcpu_info_placement();
  184. /*
  185. * The alternative logic (which patches the unlock/lock) runs before
  186. * the smp bootup up code is activated. Hence we need to set this up
  187. * the core kernel is being patched. Otherwise we will have only
  188. * modules patched but not core code.
  189. */
  190. xen_init_spinlocks();
  191. }
  192. static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus)
  193. {
  194. unsigned cpu;
  195. unsigned int i;
  196. if (skip_ioapic_setup) {
  197. char *m = (max_cpus == 0) ?
  198. "The nosmp parameter is incompatible with Xen; " \
  199. "use Xen dom0_max_vcpus=1 parameter" :
  200. "The noapic parameter is incompatible with Xen";
  201. xen_raw_printk(m);
  202. panic(m);
  203. }
  204. xen_init_lock_cpu(0);
  205. smp_store_boot_cpu_info();
  206. cpu_data(0).x86_max_cores = 1;
  207. for_each_possible_cpu(i) {
  208. zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
  209. zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
  210. zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
  211. }
  212. set_cpu_sibling_map(0);
  213. speculative_store_bypass_ht_init();
  214. xen_pmu_init(0);
  215. if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))
  216. BUG();
  217. if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL))
  218. panic("could not allocate xen_cpu_initialized_map\n");
  219. cpumask_copy(xen_cpu_initialized_map, cpumask_of(0));
  220. /* Restrict the possible_map according to max_cpus. */
  221. while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
  222. for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--)
  223. continue;
  224. set_cpu_possible(cpu, false);
  225. }
  226. for_each_possible_cpu(cpu)
  227. set_cpu_present(cpu, true);
  228. }
  229. static int
  230. cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
  231. {
  232. struct vcpu_guest_context *ctxt;
  233. struct desc_struct *gdt;
  234. unsigned long gdt_mfn;
  235. /* used to tell cpu_init() that it can proceed with initialization */
  236. cpumask_set_cpu(cpu, cpu_callout_mask);
  237. if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
  238. return 0;
  239. ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
  240. if (ctxt == NULL)
  241. return -ENOMEM;
  242. gdt = get_cpu_gdt_rw(cpu);
  243. #ifdef CONFIG_X86_32
  244. ctxt->user_regs.fs = __KERNEL_PERCPU;
  245. ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
  246. #endif
  247. memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));
  248. /*
  249. * Bring up the CPU in cpu_bringup_and_idle() with the stack
  250. * pointing just below where pt_regs would be if it were a normal
  251. * kernel entry.
  252. */
  253. ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
  254. ctxt->flags = VGCF_IN_KERNEL;
  255. ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
  256. ctxt->user_regs.ds = __USER_DS;
  257. ctxt->user_regs.es = __USER_DS;
  258. ctxt->user_regs.ss = __KERNEL_DS;
  259. ctxt->user_regs.cs = __KERNEL_CS;
  260. ctxt->user_regs.esp = (unsigned long)task_pt_regs(idle);
  261. xen_copy_trap_info(ctxt->trap_ctxt);
  262. ctxt->ldt_ents = 0;
  263. BUG_ON((unsigned long)gdt & ~PAGE_MASK);
  264. gdt_mfn = arbitrary_virt_to_mfn(gdt);
  265. make_lowmem_page_readonly(gdt);
  266. make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));
  267. ctxt->gdt_frames[0] = gdt_mfn;
  268. ctxt->gdt_ents = GDT_ENTRIES;
  269. /*
  270. * Set SS:SP that Xen will use when entering guest kernel mode
  271. * from guest user mode. Subsequent calls to load_sp0() can
  272. * change this value.
  273. */
  274. ctxt->kernel_ss = __KERNEL_DS;
  275. ctxt->kernel_sp = task_top_of_stack(idle);
  276. #ifdef CONFIG_X86_32
  277. ctxt->event_callback_cs = __KERNEL_CS;
  278. ctxt->failsafe_callback_cs = __KERNEL_CS;
  279. #else
  280. ctxt->gs_base_kernel = per_cpu_offset(cpu);
  281. #endif
  282. ctxt->event_callback_eip =
  283. (unsigned long)xen_hypervisor_callback;
  284. ctxt->failsafe_callback_eip =
  285. (unsigned long)xen_failsafe_callback;
  286. per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
  287. ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
  288. if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
  289. BUG();
  290. kfree(ctxt);
  291. return 0;
  292. }
  293. static int xen_pv_cpu_up(unsigned int cpu, struct task_struct *idle)
  294. {
  295. int rc;
  296. common_cpu_up(cpu, idle);
  297. xen_setup_runstate_info(cpu);
  298. /*
  299. * PV VCPUs are always successfully taken down (see 'while' loop
  300. * in xen_cpu_die()), so -EBUSY is an error.
  301. */
  302. rc = cpu_check_up_prepare(cpu);
  303. if (rc)
  304. return rc;
  305. /* make sure interrupts start blocked */
  306. per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1;
  307. rc = cpu_initialize_context(cpu, idle);
  308. if (rc)
  309. return rc;
  310. xen_pmu_init(cpu);
  311. rc = HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL);
  312. BUG_ON(rc);
  313. while (cpu_report_state(cpu) != CPU_ONLINE)
  314. HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
  315. return 0;
  316. }
  317. #ifdef CONFIG_HOTPLUG_CPU
  318. static int xen_pv_cpu_disable(void)
  319. {
  320. unsigned int cpu = smp_processor_id();
  321. if (cpu == 0)
  322. return -EBUSY;
  323. cpu_disable_common();
  324. load_cr3(swapper_pg_dir);
  325. return 0;
  326. }
  327. static void xen_pv_cpu_die(unsigned int cpu)
  328. {
  329. while (HYPERVISOR_vcpu_op(VCPUOP_is_up,
  330. xen_vcpu_nr(cpu), NULL)) {
  331. __set_current_state(TASK_UNINTERRUPTIBLE);
  332. schedule_timeout(HZ/10);
  333. }
  334. if (common_cpu_die(cpu) == 0) {
  335. xen_smp_intr_free(cpu);
  336. xen_uninit_lock_cpu(cpu);
  337. xen_teardown_timer(cpu);
  338. xen_pmu_finish(cpu);
  339. }
  340. }
  341. static void xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */
  342. {
  343. play_dead_common();
  344. HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(smp_processor_id()), NULL);
  345. cpu_bringup();
  346. /*
  347. * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down)
  348. * clears certain data that the cpu_idle loop (which called us
  349. * and that we return from) expects. The only way to get that
  350. * data back is to call:
  351. */
  352. tick_nohz_idle_enter();
  353. tick_nohz_idle_stop_tick_protected();
  354. cpuhp_online_idle(CPUHP_AP_ONLINE_IDLE);
  355. }
  356. #else /* !CONFIG_HOTPLUG_CPU */
  357. static int xen_pv_cpu_disable(void)
  358. {
  359. return -ENOSYS;
  360. }
  361. static void xen_pv_cpu_die(unsigned int cpu)
  362. {
  363. BUG();
  364. }
  365. static void xen_pv_play_dead(void)
  366. {
  367. BUG();
  368. }
  369. #endif
  370. static void stop_self(void *v)
  371. {
  372. int cpu = smp_processor_id();
  373. /* make sure we're not pinning something down */
  374. load_cr3(swapper_pg_dir);
  375. /* should set up a minimal gdt */
  376. set_cpu_online(cpu, false);
  377. HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL);
  378. BUG();
  379. }
  380. static void xen_pv_stop_other_cpus(int wait)
  381. {
  382. smp_call_function(stop_self, NULL, wait);
  383. }
  384. static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
  385. {
  386. irq_enter();
  387. irq_work_run();
  388. inc_irq_stat(apic_irq_work_irqs);
  389. irq_exit();
  390. return IRQ_HANDLED;
  391. }
  392. static const struct smp_ops xen_smp_ops __initconst = {
  393. .smp_prepare_boot_cpu = xen_pv_smp_prepare_boot_cpu,
  394. .smp_prepare_cpus = xen_pv_smp_prepare_cpus,
  395. .smp_cpus_done = xen_smp_cpus_done,
  396. .cpu_up = xen_pv_cpu_up,
  397. .cpu_die = xen_pv_cpu_die,
  398. .cpu_disable = xen_pv_cpu_disable,
  399. .play_dead = xen_pv_play_dead,
  400. .stop_other_cpus = xen_pv_stop_other_cpus,
  401. .smp_send_reschedule = xen_smp_send_reschedule,
  402. .send_call_func_ipi = xen_smp_send_call_function_ipi,
  403. .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi,
  404. };
  405. void __init xen_smp_init(void)
  406. {
  407. smp_ops = xen_smp_ops;
  408. xen_fill_possible_map();
  409. }