book3s.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956
  1. /*
  2. * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
  3. *
  4. * Authors:
  5. * Alexander Graf <agraf@suse.de>
  6. * Kevin Wolf <mail@kevin-wolf.de>
  7. *
  8. * Description:
  9. * This file is derived from arch/powerpc/kvm/44x.c,
  10. * by Hollis Blanchard <hollisb@us.ibm.com>.
  11. *
  12. * This program is free software; you can redistribute it and/or modify
  13. * it under the terms of the GNU General Public License, version 2, as
  14. * published by the Free Software Foundation.
  15. */
  16. #include <linux/kvm_host.h>
  17. #include <linux/err.h>
  18. #include <linux/export.h>
  19. #include <linux/slab.h>
  20. #include <linux/module.h>
  21. #include <linux/miscdevice.h>
  22. #include <asm/reg.h>
  23. #include <asm/cputable.h>
  24. #include <asm/cacheflush.h>
  25. #include <asm/tlbflush.h>
  26. #include <asm/uaccess.h>
  27. #include <asm/io.h>
  28. #include <asm/kvm_ppc.h>
  29. #include <asm/kvm_book3s.h>
  30. #include <asm/mmu_context.h>
  31. #include <asm/page.h>
  32. #include <linux/gfp.h>
  33. #include <linux/sched.h>
  34. #include <linux/vmalloc.h>
  35. #include <linux/highmem.h>
  36. #include "book3s.h"
  37. #include "trace.h"
  38. #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
  39. /* #define EXIT_DEBUG */
  40. struct kvm_stats_debugfs_item debugfs_entries[] = {
  41. { "exits", VCPU_STAT(sum_exits) },
  42. { "mmio", VCPU_STAT(mmio_exits) },
  43. { "sig", VCPU_STAT(signal_exits) },
  44. { "sysc", VCPU_STAT(syscall_exits) },
  45. { "inst_emu", VCPU_STAT(emulated_inst_exits) },
  46. { "dec", VCPU_STAT(dec_exits) },
  47. { "ext_intr", VCPU_STAT(ext_intr_exits) },
  48. { "queue_intr", VCPU_STAT(queue_intr) },
  49. { "halt_poll_success_ns", VCPU_STAT(halt_poll_success_ns) },
  50. { "halt_poll_fail_ns", VCPU_STAT(halt_poll_fail_ns) },
  51. { "halt_wait_ns", VCPU_STAT(halt_wait_ns) },
  52. { "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
  53. { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), },
  54. { "halt_successful_wait", VCPU_STAT(halt_successful_wait) },
  55. { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
  56. { "halt_wakeup", VCPU_STAT(halt_wakeup) },
  57. { "pf_storage", VCPU_STAT(pf_storage) },
  58. { "sp_storage", VCPU_STAT(sp_storage) },
  59. { "pf_instruc", VCPU_STAT(pf_instruc) },
  60. { "sp_instruc", VCPU_STAT(sp_instruc) },
  61. { "ld", VCPU_STAT(ld) },
  62. { "ld_slow", VCPU_STAT(ld_slow) },
  63. { "st", VCPU_STAT(st) },
  64. { "st_slow", VCPU_STAT(st_slow) },
  65. { "pthru_all", VCPU_STAT(pthru_all) },
  66. { "pthru_host", VCPU_STAT(pthru_host) },
  67. { "pthru_bad_aff", VCPU_STAT(pthru_bad_aff) },
  68. { NULL }
  69. };
  70. void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
  71. {
  72. if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
  73. ulong pc = kvmppc_get_pc(vcpu);
  74. if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
  75. kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
  76. vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
  77. }
  78. }
  79. EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
  80. static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
  81. {
  82. if (!is_kvmppc_hv_enabled(vcpu->kvm))
  83. return to_book3s(vcpu)->hior;
  84. return 0;
  85. }
  86. static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
  87. unsigned long pending_now, unsigned long old_pending)
  88. {
  89. if (is_kvmppc_hv_enabled(vcpu->kvm))
  90. return;
  91. if (pending_now)
  92. kvmppc_set_int_pending(vcpu, 1);
  93. else if (old_pending)
  94. kvmppc_set_int_pending(vcpu, 0);
  95. }
  96. static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
  97. {
  98. ulong crit_raw;
  99. ulong crit_r1;
  100. bool crit;
  101. if (is_kvmppc_hv_enabled(vcpu->kvm))
  102. return false;
  103. crit_raw = kvmppc_get_critical(vcpu);
  104. crit_r1 = kvmppc_get_gpr(vcpu, 1);
  105. /* Truncate crit indicators in 32 bit mode */
  106. if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
  107. crit_raw &= 0xffffffff;
  108. crit_r1 &= 0xffffffff;
  109. }
  110. /* Critical section when crit == r1 */
  111. crit = (crit_raw == crit_r1);
  112. /* ... and we're in supervisor mode */
  113. crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
  114. return crit;
  115. }
  116. void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
  117. {
  118. kvmppc_unfixup_split_real(vcpu);
  119. kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
  120. kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
  121. kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
  122. vcpu->arch.mmu.reset_msr(vcpu);
  123. }
  124. static int kvmppc_book3s_vec2irqprio(unsigned int vec)
  125. {
  126. unsigned int prio;
  127. switch (vec) {
  128. case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
  129. case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
  130. case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
  131. case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
  132. case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
  133. case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
  134. case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
  135. case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break;
  136. case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
  137. case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
  138. case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
  139. case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
  140. case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
  141. case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
  142. case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
  143. case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
  144. case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break;
  145. default: prio = BOOK3S_IRQPRIO_MAX; break;
  146. }
  147. return prio;
  148. }
  149. void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
  150. unsigned int vec)
  151. {
  152. unsigned long old_pending = vcpu->arch.pending_exceptions;
  153. clear_bit(kvmppc_book3s_vec2irqprio(vec),
  154. &vcpu->arch.pending_exceptions);
  155. kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
  156. old_pending);
  157. }
  158. void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
  159. {
  160. vcpu->stat.queue_intr++;
  161. set_bit(kvmppc_book3s_vec2irqprio(vec),
  162. &vcpu->arch.pending_exceptions);
  163. #ifdef EXIT_DEBUG
  164. printk(KERN_INFO "Queueing interrupt %x\n", vec);
  165. #endif
  166. }
  167. EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
  168. void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
  169. {
  170. /* might as well deliver this straight away */
  171. kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
  172. }
  173. EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
  174. void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
  175. {
  176. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
  177. }
  178. EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
  179. int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
  180. {
  181. return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
  182. }
  183. EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
  184. void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
  185. {
  186. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
  187. }
  188. EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
  189. void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
  190. struct kvm_interrupt *irq)
  191. {
  192. unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
  193. if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
  194. vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
  195. kvmppc_book3s_queue_irqprio(vcpu, vec);
  196. }
  197. void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
  198. {
  199. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
  200. kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
  201. }
  202. void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
  203. ulong flags)
  204. {
  205. kvmppc_set_dar(vcpu, dar);
  206. kvmppc_set_dsisr(vcpu, flags);
  207. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
  208. }
  209. void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
  210. {
  211. u64 msr = kvmppc_get_msr(vcpu);
  212. msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
  213. msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
  214. kvmppc_set_msr_fast(vcpu, msr);
  215. kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
  216. }
  217. static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
  218. unsigned int priority)
  219. {
  220. int deliver = 1;
  221. int vec = 0;
  222. bool crit = kvmppc_critical_section(vcpu);
  223. switch (priority) {
  224. case BOOK3S_IRQPRIO_DECREMENTER:
  225. deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
  226. vec = BOOK3S_INTERRUPT_DECREMENTER;
  227. break;
  228. case BOOK3S_IRQPRIO_EXTERNAL:
  229. case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
  230. deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
  231. vec = BOOK3S_INTERRUPT_EXTERNAL;
  232. break;
  233. case BOOK3S_IRQPRIO_SYSTEM_RESET:
  234. vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
  235. break;
  236. case BOOK3S_IRQPRIO_MACHINE_CHECK:
  237. vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
  238. break;
  239. case BOOK3S_IRQPRIO_DATA_STORAGE:
  240. vec = BOOK3S_INTERRUPT_DATA_STORAGE;
  241. break;
  242. case BOOK3S_IRQPRIO_INST_STORAGE:
  243. vec = BOOK3S_INTERRUPT_INST_STORAGE;
  244. break;
  245. case BOOK3S_IRQPRIO_DATA_SEGMENT:
  246. vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
  247. break;
  248. case BOOK3S_IRQPRIO_INST_SEGMENT:
  249. vec = BOOK3S_INTERRUPT_INST_SEGMENT;
  250. break;
  251. case BOOK3S_IRQPRIO_ALIGNMENT:
  252. vec = BOOK3S_INTERRUPT_ALIGNMENT;
  253. break;
  254. case BOOK3S_IRQPRIO_PROGRAM:
  255. vec = BOOK3S_INTERRUPT_PROGRAM;
  256. break;
  257. case BOOK3S_IRQPRIO_VSX:
  258. vec = BOOK3S_INTERRUPT_VSX;
  259. break;
  260. case BOOK3S_IRQPRIO_ALTIVEC:
  261. vec = BOOK3S_INTERRUPT_ALTIVEC;
  262. break;
  263. case BOOK3S_IRQPRIO_FP_UNAVAIL:
  264. vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
  265. break;
  266. case BOOK3S_IRQPRIO_SYSCALL:
  267. vec = BOOK3S_INTERRUPT_SYSCALL;
  268. break;
  269. case BOOK3S_IRQPRIO_DEBUG:
  270. vec = BOOK3S_INTERRUPT_TRACE;
  271. break;
  272. case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
  273. vec = BOOK3S_INTERRUPT_PERFMON;
  274. break;
  275. case BOOK3S_IRQPRIO_FAC_UNAVAIL:
  276. vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
  277. break;
  278. default:
  279. deliver = 0;
  280. printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
  281. break;
  282. }
  283. #if 0
  284. printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
  285. #endif
  286. if (deliver)
  287. kvmppc_inject_interrupt(vcpu, vec, 0);
  288. return deliver;
  289. }
  290. /*
  291. * This function determines if an irqprio should be cleared once issued.
  292. */
  293. static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
  294. {
  295. switch (priority) {
  296. case BOOK3S_IRQPRIO_DECREMENTER:
  297. /* DEC interrupts get cleared by mtdec */
  298. return false;
  299. case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
  300. /* External interrupts get cleared by userspace */
  301. return false;
  302. }
  303. return true;
  304. }
  305. int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
  306. {
  307. unsigned long *pending = &vcpu->arch.pending_exceptions;
  308. unsigned long old_pending = vcpu->arch.pending_exceptions;
  309. unsigned int priority;
  310. #ifdef EXIT_DEBUG
  311. if (vcpu->arch.pending_exceptions)
  312. printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
  313. #endif
  314. priority = __ffs(*pending);
  315. while (priority < BOOK3S_IRQPRIO_MAX) {
  316. if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
  317. clear_irqprio(vcpu, priority)) {
  318. clear_bit(priority, &vcpu->arch.pending_exceptions);
  319. break;
  320. }
  321. priority = find_next_bit(pending,
  322. BITS_PER_BYTE * sizeof(*pending),
  323. priority + 1);
  324. }
  325. /* Tell the guest about our interrupt status */
  326. kvmppc_update_int_pending(vcpu, *pending, old_pending);
  327. return 0;
  328. }
  329. EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
  330. kvm_pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
  331. bool *writable)
  332. {
  333. ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
  334. gfn_t gfn = gpa >> PAGE_SHIFT;
  335. if (!(kvmppc_get_msr(vcpu) & MSR_SF))
  336. mp_pa = (uint32_t)mp_pa;
  337. /* Magic page override */
  338. gpa &= ~0xFFFULL;
  339. if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
  340. ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
  341. kvm_pfn_t pfn;
  342. pfn = (kvm_pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
  343. get_page(pfn_to_page(pfn));
  344. if (writable)
  345. *writable = true;
  346. return pfn;
  347. }
  348. return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
  349. }
  350. EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
  351. int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
  352. enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
  353. {
  354. bool data = (xlid == XLATE_DATA);
  355. bool iswrite = (xlrw == XLATE_WRITE);
  356. int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
  357. int r;
  358. if (relocated) {
  359. r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
  360. } else {
  361. pte->eaddr = eaddr;
  362. pte->raddr = eaddr & KVM_PAM;
  363. pte->vpage = VSID_REAL | eaddr >> 12;
  364. pte->may_read = true;
  365. pte->may_write = true;
  366. pte->may_execute = true;
  367. r = 0;
  368. if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
  369. !data) {
  370. if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
  371. ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
  372. pte->raddr &= ~SPLIT_HACK_MASK;
  373. }
  374. }
  375. return r;
  376. }
  377. int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
  378. u32 *inst)
  379. {
  380. ulong pc = kvmppc_get_pc(vcpu);
  381. int r;
  382. if (type == INST_SC)
  383. pc -= 4;
  384. r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
  385. if (r == EMULATE_DONE)
  386. return r;
  387. else
  388. return EMULATE_AGAIN;
  389. }
  390. EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
  391. int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
  392. {
  393. return 0;
  394. }
  395. int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
  396. {
  397. return 0;
  398. }
  399. void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
  400. {
  401. }
  402. int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
  403. struct kvm_sregs *sregs)
  404. {
  405. return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
  406. }
  407. int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
  408. struct kvm_sregs *sregs)
  409. {
  410. return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
  411. }
  412. int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  413. {
  414. int i;
  415. regs->pc = kvmppc_get_pc(vcpu);
  416. regs->cr = kvmppc_get_cr(vcpu);
  417. regs->ctr = kvmppc_get_ctr(vcpu);
  418. regs->lr = kvmppc_get_lr(vcpu);
  419. regs->xer = kvmppc_get_xer(vcpu);
  420. regs->msr = kvmppc_get_msr(vcpu);
  421. regs->srr0 = kvmppc_get_srr0(vcpu);
  422. regs->srr1 = kvmppc_get_srr1(vcpu);
  423. regs->pid = vcpu->arch.pid;
  424. regs->sprg0 = kvmppc_get_sprg0(vcpu);
  425. regs->sprg1 = kvmppc_get_sprg1(vcpu);
  426. regs->sprg2 = kvmppc_get_sprg2(vcpu);
  427. regs->sprg3 = kvmppc_get_sprg3(vcpu);
  428. regs->sprg4 = kvmppc_get_sprg4(vcpu);
  429. regs->sprg5 = kvmppc_get_sprg5(vcpu);
  430. regs->sprg6 = kvmppc_get_sprg6(vcpu);
  431. regs->sprg7 = kvmppc_get_sprg7(vcpu);
  432. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  433. regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
  434. return 0;
  435. }
  436. int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
  437. {
  438. int i;
  439. kvmppc_set_pc(vcpu, regs->pc);
  440. kvmppc_set_cr(vcpu, regs->cr);
  441. kvmppc_set_ctr(vcpu, regs->ctr);
  442. kvmppc_set_lr(vcpu, regs->lr);
  443. kvmppc_set_xer(vcpu, regs->xer);
  444. kvmppc_set_msr(vcpu, regs->msr);
  445. kvmppc_set_srr0(vcpu, regs->srr0);
  446. kvmppc_set_srr1(vcpu, regs->srr1);
  447. kvmppc_set_sprg0(vcpu, regs->sprg0);
  448. kvmppc_set_sprg1(vcpu, regs->sprg1);
  449. kvmppc_set_sprg2(vcpu, regs->sprg2);
  450. kvmppc_set_sprg3(vcpu, regs->sprg3);
  451. kvmppc_set_sprg4(vcpu, regs->sprg4);
  452. kvmppc_set_sprg5(vcpu, regs->sprg5);
  453. kvmppc_set_sprg6(vcpu, regs->sprg6);
  454. kvmppc_set_sprg7(vcpu, regs->sprg7);
  455. for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
  456. kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
  457. return 0;
  458. }
  459. int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  460. {
  461. return -ENOTSUPP;
  462. }
  463. int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
  464. {
  465. return -ENOTSUPP;
  466. }
  467. int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
  468. union kvmppc_one_reg *val)
  469. {
  470. int r = 0;
  471. long int i;
  472. r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
  473. if (r == -EINVAL) {
  474. r = 0;
  475. switch (id) {
  476. case KVM_REG_PPC_DAR:
  477. *val = get_reg_val(id, kvmppc_get_dar(vcpu));
  478. break;
  479. case KVM_REG_PPC_DSISR:
  480. *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
  481. break;
  482. case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
  483. i = id - KVM_REG_PPC_FPR0;
  484. *val = get_reg_val(id, VCPU_FPR(vcpu, i));
  485. break;
  486. case KVM_REG_PPC_FPSCR:
  487. *val = get_reg_val(id, vcpu->arch.fp.fpscr);
  488. break;
  489. #ifdef CONFIG_VSX
  490. case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  491. if (cpu_has_feature(CPU_FTR_VSX)) {
  492. i = id - KVM_REG_PPC_VSR0;
  493. val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
  494. val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
  495. } else {
  496. r = -ENXIO;
  497. }
  498. break;
  499. #endif /* CONFIG_VSX */
  500. case KVM_REG_PPC_DEBUG_INST:
  501. *val = get_reg_val(id, INS_TW);
  502. break;
  503. #ifdef CONFIG_KVM_XICS
  504. case KVM_REG_PPC_ICP_STATE:
  505. if (!vcpu->arch.icp) {
  506. r = -ENXIO;
  507. break;
  508. }
  509. *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
  510. break;
  511. #endif /* CONFIG_KVM_XICS */
  512. case KVM_REG_PPC_FSCR:
  513. *val = get_reg_val(id, vcpu->arch.fscr);
  514. break;
  515. case KVM_REG_PPC_TAR:
  516. *val = get_reg_val(id, vcpu->arch.tar);
  517. break;
  518. case KVM_REG_PPC_EBBHR:
  519. *val = get_reg_val(id, vcpu->arch.ebbhr);
  520. break;
  521. case KVM_REG_PPC_EBBRR:
  522. *val = get_reg_val(id, vcpu->arch.ebbrr);
  523. break;
  524. case KVM_REG_PPC_BESCR:
  525. *val = get_reg_val(id, vcpu->arch.bescr);
  526. break;
  527. case KVM_REG_PPC_IC:
  528. *val = get_reg_val(id, vcpu->arch.ic);
  529. break;
  530. default:
  531. r = -EINVAL;
  532. break;
  533. }
  534. }
  535. return r;
  536. }
  537. int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
  538. union kvmppc_one_reg *val)
  539. {
  540. int r = 0;
  541. long int i;
  542. r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
  543. if (r == -EINVAL) {
  544. r = 0;
  545. switch (id) {
  546. case KVM_REG_PPC_DAR:
  547. kvmppc_set_dar(vcpu, set_reg_val(id, *val));
  548. break;
  549. case KVM_REG_PPC_DSISR:
  550. kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
  551. break;
  552. case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
  553. i = id - KVM_REG_PPC_FPR0;
  554. VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
  555. break;
  556. case KVM_REG_PPC_FPSCR:
  557. vcpu->arch.fp.fpscr = set_reg_val(id, *val);
  558. break;
  559. #ifdef CONFIG_VSX
  560. case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
  561. if (cpu_has_feature(CPU_FTR_VSX)) {
  562. i = id - KVM_REG_PPC_VSR0;
  563. vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
  564. vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
  565. } else {
  566. r = -ENXIO;
  567. }
  568. break;
  569. #endif /* CONFIG_VSX */
  570. #ifdef CONFIG_KVM_XICS
  571. case KVM_REG_PPC_ICP_STATE:
  572. if (!vcpu->arch.icp) {
  573. r = -ENXIO;
  574. break;
  575. }
  576. r = kvmppc_xics_set_icp(vcpu,
  577. set_reg_val(id, *val));
  578. break;
  579. #endif /* CONFIG_KVM_XICS */
  580. case KVM_REG_PPC_FSCR:
  581. vcpu->arch.fscr = set_reg_val(id, *val);
  582. break;
  583. case KVM_REG_PPC_TAR:
  584. vcpu->arch.tar = set_reg_val(id, *val);
  585. break;
  586. case KVM_REG_PPC_EBBHR:
  587. vcpu->arch.ebbhr = set_reg_val(id, *val);
  588. break;
  589. case KVM_REG_PPC_EBBRR:
  590. vcpu->arch.ebbrr = set_reg_val(id, *val);
  591. break;
  592. case KVM_REG_PPC_BESCR:
  593. vcpu->arch.bescr = set_reg_val(id, *val);
  594. break;
  595. case KVM_REG_PPC_IC:
  596. vcpu->arch.ic = set_reg_val(id, *val);
  597. break;
  598. default:
  599. r = -EINVAL;
  600. break;
  601. }
  602. }
  603. return r;
  604. }
  605. void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  606. {
  607. vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
  608. }
  609. void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
  610. {
  611. vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
  612. }
  613. void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
  614. {
  615. vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
  616. }
  617. EXPORT_SYMBOL_GPL(kvmppc_set_msr);
  618. int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
  619. {
  620. return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
  621. }
  622. int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
  623. struct kvm_translation *tr)
  624. {
  625. return 0;
  626. }
  627. int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
  628. struct kvm_guest_debug *dbg)
  629. {
  630. vcpu->guest_debug = dbg->control;
  631. return 0;
  632. }
  633. void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
  634. {
  635. kvmppc_core_queue_dec(vcpu);
  636. kvm_vcpu_kick(vcpu);
  637. }
  638. struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
  639. {
  640. return kvm->arch.kvm_ops->vcpu_create(kvm, id);
  641. }
  642. void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
  643. {
  644. vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
  645. }
  646. int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
  647. {
  648. return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
  649. }
  650. int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
  651. {
  652. return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
  653. }
  654. void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
  655. struct kvm_memory_slot *dont)
  656. {
  657. kvm->arch.kvm_ops->free_memslot(free, dont);
  658. }
  659. int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
  660. unsigned long npages)
  661. {
  662. return kvm->arch.kvm_ops->create_memslot(slot, npages);
  663. }
  664. void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
  665. {
  666. kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
  667. }
  668. int kvmppc_core_prepare_memory_region(struct kvm *kvm,
  669. struct kvm_memory_slot *memslot,
  670. const struct kvm_userspace_memory_region *mem)
  671. {
  672. return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
  673. }
  674. void kvmppc_core_commit_memory_region(struct kvm *kvm,
  675. const struct kvm_userspace_memory_region *mem,
  676. const struct kvm_memory_slot *old,
  677. const struct kvm_memory_slot *new)
  678. {
  679. kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
  680. }
  681. int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
  682. {
  683. return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
  684. }
  685. EXPORT_SYMBOL_GPL(kvm_unmap_hva);
  686. int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
  687. {
  688. return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
  689. }
  690. int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
  691. {
  692. return kvm->arch.kvm_ops->age_hva(kvm, start, end);
  693. }
  694. int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
  695. {
  696. return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
  697. }
  698. void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
  699. {
  700. kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
  701. }
  702. void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
  703. {
  704. vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
  705. }
  706. int kvmppc_core_init_vm(struct kvm *kvm)
  707. {
  708. #ifdef CONFIG_PPC64
  709. INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
  710. INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
  711. #endif
  712. return kvm->arch.kvm_ops->init_vm(kvm);
  713. }
  714. void kvmppc_core_destroy_vm(struct kvm *kvm)
  715. {
  716. kvm->arch.kvm_ops->destroy_vm(kvm);
  717. #ifdef CONFIG_PPC64
  718. kvmppc_rtas_tokens_free(kvm);
  719. WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
  720. #endif
  721. }
  722. int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
  723. {
  724. unsigned long size = kvmppc_get_gpr(vcpu, 4);
  725. unsigned long addr = kvmppc_get_gpr(vcpu, 5);
  726. u64 buf;
  727. int srcu_idx;
  728. int ret;
  729. if (!is_power_of_2(size) || (size > sizeof(buf)))
  730. return H_TOO_HARD;
  731. srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  732. ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
  733. srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
  734. if (ret != 0)
  735. return H_TOO_HARD;
  736. switch (size) {
  737. case 1:
  738. kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
  739. break;
  740. case 2:
  741. kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
  742. break;
  743. case 4:
  744. kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
  745. break;
  746. case 8:
  747. kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
  748. break;
  749. default:
  750. BUG();
  751. }
  752. return H_SUCCESS;
  753. }
  754. EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
  755. int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
  756. {
  757. unsigned long size = kvmppc_get_gpr(vcpu, 4);
  758. unsigned long addr = kvmppc_get_gpr(vcpu, 5);
  759. unsigned long val = kvmppc_get_gpr(vcpu, 6);
  760. u64 buf;
  761. int srcu_idx;
  762. int ret;
  763. switch (size) {
  764. case 1:
  765. *(u8 *)&buf = val;
  766. break;
  767. case 2:
  768. *(__be16 *)&buf = cpu_to_be16(val);
  769. break;
  770. case 4:
  771. *(__be32 *)&buf = cpu_to_be32(val);
  772. break;
  773. case 8:
  774. *(__be64 *)&buf = cpu_to_be64(val);
  775. break;
  776. default:
  777. return H_TOO_HARD;
  778. }
  779. srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
  780. ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
  781. srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
  782. if (ret != 0)
  783. return H_TOO_HARD;
  784. return H_SUCCESS;
  785. }
  786. EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
  787. int kvmppc_core_check_processor_compat(void)
  788. {
  789. /*
  790. * We always return 0 for book3s. We check
  791. * for compatibility while loading the HV
  792. * or PR module
  793. */
  794. return 0;
  795. }
  796. int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
  797. {
  798. return kvm->arch.kvm_ops->hcall_implemented(hcall);
  799. }
  800. static int kvmppc_book3s_init(void)
  801. {
  802. int r;
  803. r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
  804. if (r)
  805. return r;
  806. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  807. r = kvmppc_book3s_init_pr();
  808. #endif
  809. return r;
  810. }
  811. static void kvmppc_book3s_exit(void)
  812. {
  813. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  814. kvmppc_book3s_exit_pr();
  815. #endif
  816. kvm_exit();
  817. }
  818. module_init(kvmppc_book3s_init);
  819. module_exit(kvmppc_book3s_exit);
  820. /* On 32bit this is our one and only kernel module */
  821. #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
  822. MODULE_ALIAS_MISCDEV(KVM_MINOR);
  823. MODULE_ALIAS("devname:kvm");
  824. #endif