trap_emul.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
  7. *
  8. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  9. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/err.h>
  13. #include <linux/vmalloc.h>
  14. #include <linux/kvm_host.h>
  15. #include "interrupt.h"
  16. static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
  17. {
  18. gpa_t gpa;
  19. gva_t kseg = KSEGX(gva);
  20. if ((kseg == CKSEG0) || (kseg == CKSEG1))
  21. gpa = CPHYSADDR(gva);
  22. else {
  23. kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
  24. kvm_mips_dump_host_tlbs();
  25. gpa = KVM_INVALID_ADDR;
  26. }
  27. kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
  28. return gpa;
  29. }
  30. static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
  31. {
  32. struct mips_coproc *cop0 = vcpu->arch.cop0;
  33. struct kvm_run *run = vcpu->run;
  34. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  35. u32 cause = vcpu->arch.host_cp0_cause;
  36. enum emulation_result er = EMULATE_DONE;
  37. int ret = RESUME_GUEST;
  38. if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
  39. /* FPU Unusable */
  40. if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
  41. (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
  42. /*
  43. * Unusable/no FPU in guest:
  44. * deliver guest COP1 Unusable Exception
  45. */
  46. er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
  47. } else {
  48. /* Restore FPU state */
  49. kvm_own_fpu(vcpu);
  50. er = EMULATE_DONE;
  51. }
  52. } else {
  53. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  54. }
  55. switch (er) {
  56. case EMULATE_DONE:
  57. ret = RESUME_GUEST;
  58. break;
  59. case EMULATE_FAIL:
  60. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  61. ret = RESUME_HOST;
  62. break;
  63. case EMULATE_WAIT:
  64. run->exit_reason = KVM_EXIT_INTR;
  65. ret = RESUME_HOST;
  66. break;
  67. default:
  68. BUG();
  69. }
  70. return ret;
  71. }
  72. static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
  73. {
  74. struct kvm_run *run = vcpu->run;
  75. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  76. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  77. u32 cause = vcpu->arch.host_cp0_cause;
  78. enum emulation_result er = EMULATE_DONE;
  79. int ret = RESUME_GUEST;
  80. if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
  81. || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
  82. kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
  83. cause, opc, badvaddr);
  84. er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
  85. if (er == EMULATE_DONE)
  86. ret = RESUME_GUEST;
  87. else {
  88. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  89. ret = RESUME_HOST;
  90. }
  91. } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
  92. /*
  93. * XXXKYMA: The guest kernel does not expect to get this fault
  94. * when we are not using HIGHMEM. Need to address this in a
  95. * HIGHMEM kernel
  96. */
  97. kvm_err("TLB MOD fault not handled, cause %#x, PC: %p, BadVaddr: %#lx\n",
  98. cause, opc, badvaddr);
  99. kvm_mips_dump_host_tlbs();
  100. kvm_arch_vcpu_dump_regs(vcpu);
  101. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  102. ret = RESUME_HOST;
  103. } else {
  104. kvm_err("Illegal TLB Mod fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
  105. cause, opc, badvaddr);
  106. kvm_mips_dump_host_tlbs();
  107. kvm_arch_vcpu_dump_regs(vcpu);
  108. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  109. ret = RESUME_HOST;
  110. }
  111. return ret;
  112. }
  113. static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
  114. {
  115. struct kvm_run *run = vcpu->run;
  116. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  117. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  118. u32 cause = vcpu->arch.host_cp0_cause;
  119. enum emulation_result er = EMULATE_DONE;
  120. int ret = RESUME_GUEST;
  121. if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
  122. && KVM_GUEST_KERNEL_MODE(vcpu)) {
  123. if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
  124. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  125. ret = RESUME_HOST;
  126. }
  127. } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
  128. || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
  129. kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
  130. store ? "ST" : "LD", cause, opc, badvaddr);
  131. /*
  132. * User Address (UA) fault, this could happen if
  133. * (1) TLB entry not present/valid in both Guest and shadow host
  134. * TLBs, in this case we pass on the fault to the guest
  135. * kernel and let it handle it.
  136. * (2) TLB entry is present in the Guest TLB but not in the
  137. * shadow, in this case we inject the TLB from the Guest TLB
  138. * into the shadow host TLB
  139. */
  140. er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
  141. if (er == EMULATE_DONE)
  142. ret = RESUME_GUEST;
  143. else {
  144. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  145. ret = RESUME_HOST;
  146. }
  147. } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
  148. /*
  149. * All KSEG0 faults are handled by KVM, as the guest kernel does
  150. * not expect to ever get them
  151. */
  152. if (kvm_mips_handle_kseg0_tlb_fault
  153. (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
  154. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  155. ret = RESUME_HOST;
  156. }
  157. } else if (KVM_GUEST_KERNEL_MODE(vcpu)
  158. && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
  159. /*
  160. * With EVA we may get a TLB exception instead of an address
  161. * error when the guest performs MMIO to KSeg1 addresses.
  162. */
  163. kvm_debug("Emulate %s MMIO space\n",
  164. store ? "Store to" : "Load from");
  165. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  166. if (er == EMULATE_FAIL) {
  167. kvm_err("Emulate %s MMIO space failed\n",
  168. store ? "Store to" : "Load from");
  169. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  170. ret = RESUME_HOST;
  171. } else {
  172. run->exit_reason = KVM_EXIT_MMIO;
  173. ret = RESUME_HOST;
  174. }
  175. } else {
  176. kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
  177. store ? "ST" : "LD", cause, opc, badvaddr);
  178. kvm_mips_dump_host_tlbs();
  179. kvm_arch_vcpu_dump_regs(vcpu);
  180. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  181. ret = RESUME_HOST;
  182. }
  183. return ret;
  184. }
  185. static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
  186. {
  187. return kvm_trap_emul_handle_tlb_miss(vcpu, true);
  188. }
  189. static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
  190. {
  191. return kvm_trap_emul_handle_tlb_miss(vcpu, false);
  192. }
  193. static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
  194. {
  195. struct kvm_run *run = vcpu->run;
  196. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  197. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  198. u32 cause = vcpu->arch.host_cp0_cause;
  199. enum emulation_result er = EMULATE_DONE;
  200. int ret = RESUME_GUEST;
  201. if (KVM_GUEST_KERNEL_MODE(vcpu)
  202. && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
  203. kvm_debug("Emulate Store to MMIO space\n");
  204. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  205. if (er == EMULATE_FAIL) {
  206. kvm_err("Emulate Store to MMIO space failed\n");
  207. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  208. ret = RESUME_HOST;
  209. } else {
  210. run->exit_reason = KVM_EXIT_MMIO;
  211. ret = RESUME_HOST;
  212. }
  213. } else {
  214. kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
  215. cause, opc, badvaddr);
  216. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  217. ret = RESUME_HOST;
  218. }
  219. return ret;
  220. }
  221. static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
  222. {
  223. struct kvm_run *run = vcpu->run;
  224. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  225. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  226. u32 cause = vcpu->arch.host_cp0_cause;
  227. enum emulation_result er = EMULATE_DONE;
  228. int ret = RESUME_GUEST;
  229. if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
  230. kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
  231. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  232. if (er == EMULATE_FAIL) {
  233. kvm_err("Emulate Load from MMIO space failed\n");
  234. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  235. ret = RESUME_HOST;
  236. } else {
  237. run->exit_reason = KVM_EXIT_MMIO;
  238. ret = RESUME_HOST;
  239. }
  240. } else {
  241. kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
  242. cause, opc, badvaddr);
  243. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  244. ret = RESUME_HOST;
  245. er = EMULATE_FAIL;
  246. }
  247. return ret;
  248. }
  249. static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
  250. {
  251. struct kvm_run *run = vcpu->run;
  252. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  253. u32 cause = vcpu->arch.host_cp0_cause;
  254. enum emulation_result er = EMULATE_DONE;
  255. int ret = RESUME_GUEST;
  256. er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
  257. if (er == EMULATE_DONE)
  258. ret = RESUME_GUEST;
  259. else {
  260. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  261. ret = RESUME_HOST;
  262. }
  263. return ret;
  264. }
  265. static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
  266. {
  267. struct kvm_run *run = vcpu->run;
  268. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  269. u32 cause = vcpu->arch.host_cp0_cause;
  270. enum emulation_result er = EMULATE_DONE;
  271. int ret = RESUME_GUEST;
  272. er = kvm_mips_handle_ri(cause, opc, run, vcpu);
  273. if (er == EMULATE_DONE)
  274. ret = RESUME_GUEST;
  275. else {
  276. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  277. ret = RESUME_HOST;
  278. }
  279. return ret;
  280. }
  281. static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
  282. {
  283. struct kvm_run *run = vcpu->run;
  284. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  285. u32 cause = vcpu->arch.host_cp0_cause;
  286. enum emulation_result er = EMULATE_DONE;
  287. int ret = RESUME_GUEST;
  288. er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
  289. if (er == EMULATE_DONE)
  290. ret = RESUME_GUEST;
  291. else {
  292. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  293. ret = RESUME_HOST;
  294. }
  295. return ret;
  296. }
  297. static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
  298. {
  299. struct kvm_run *run = vcpu->run;
  300. u32 __user *opc = (u32 __user *)vcpu->arch.pc;
  301. u32 cause = vcpu->arch.host_cp0_cause;
  302. enum emulation_result er = EMULATE_DONE;
  303. int ret = RESUME_GUEST;
  304. er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
  305. if (er == EMULATE_DONE) {
  306. ret = RESUME_GUEST;
  307. } else {
  308. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  309. ret = RESUME_HOST;
  310. }
  311. return ret;
  312. }
  313. static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
  314. {
  315. struct kvm_run *run = vcpu->run;
  316. u32 __user *opc = (u32 __user *)vcpu->arch.pc;
  317. u32 cause = vcpu->arch.host_cp0_cause;
  318. enum emulation_result er = EMULATE_DONE;
  319. int ret = RESUME_GUEST;
  320. er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
  321. if (er == EMULATE_DONE) {
  322. ret = RESUME_GUEST;
  323. } else {
  324. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  325. ret = RESUME_HOST;
  326. }
  327. return ret;
  328. }
  329. static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
  330. {
  331. struct kvm_run *run = vcpu->run;
  332. u32 __user *opc = (u32 __user *)vcpu->arch.pc;
  333. u32 cause = vcpu->arch.host_cp0_cause;
  334. enum emulation_result er = EMULATE_DONE;
  335. int ret = RESUME_GUEST;
  336. er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
  337. if (er == EMULATE_DONE) {
  338. ret = RESUME_GUEST;
  339. } else {
  340. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  341. ret = RESUME_HOST;
  342. }
  343. return ret;
  344. }
  345. /**
  346. * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
  347. * @vcpu: Virtual CPU context.
  348. *
  349. * Handle when the guest attempts to use MSA when it is disabled.
  350. */
  351. static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
  352. {
  353. struct mips_coproc *cop0 = vcpu->arch.cop0;
  354. struct kvm_run *run = vcpu->run;
  355. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  356. u32 cause = vcpu->arch.host_cp0_cause;
  357. enum emulation_result er = EMULATE_DONE;
  358. int ret = RESUME_GUEST;
  359. if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
  360. (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
  361. /*
  362. * No MSA in guest, or FPU enabled and not in FR=1 mode,
  363. * guest reserved instruction exception
  364. */
  365. er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
  366. } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
  367. /* MSA disabled by guest, guest MSA disabled exception */
  368. er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
  369. } else {
  370. /* Restore MSA/FPU state */
  371. kvm_own_msa(vcpu);
  372. er = EMULATE_DONE;
  373. }
  374. switch (er) {
  375. case EMULATE_DONE:
  376. ret = RESUME_GUEST;
  377. break;
  378. case EMULATE_FAIL:
  379. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  380. ret = RESUME_HOST;
  381. break;
  382. default:
  383. BUG();
  384. }
  385. return ret;
  386. }
  387. static int kvm_trap_emul_vm_init(struct kvm *kvm)
  388. {
  389. return 0;
  390. }
  391. static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
  392. {
  393. vcpu->arch.kscratch_enabled = 0xfc;
  394. return 0;
  395. }
  396. static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
  397. {
  398. struct mips_coproc *cop0 = vcpu->arch.cop0;
  399. u32 config, config1;
  400. int vcpu_id = vcpu->vcpu_id;
  401. /*
  402. * Arch specific stuff, set up config registers properly so that the
  403. * guest will come up as expected
  404. */
  405. #ifndef CONFIG_CPU_MIPSR6
  406. /* r2-r5, simulate a MIPS 24kc */
  407. kvm_write_c0_guest_prid(cop0, 0x00019300);
  408. #else
  409. /* r6+, simulate a generic QEMU machine */
  410. kvm_write_c0_guest_prid(cop0, 0x00010000);
  411. #endif
  412. /*
  413. * Have config1, Cacheable, noncoherent, write-back, write allocate.
  414. * Endianness, arch revision & virtually tagged icache should match
  415. * host.
  416. */
  417. config = read_c0_config() & MIPS_CONF_AR;
  418. config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
  419. #ifdef CONFIG_CPU_BIG_ENDIAN
  420. config |= CONF_BE;
  421. #endif
  422. if (cpu_has_vtag_icache)
  423. config |= MIPS_CONF_VI;
  424. kvm_write_c0_guest_config(cop0, config);
  425. /* Read the cache characteristics from the host Config1 Register */
  426. config1 = (read_c0_config1() & ~0x7f);
  427. /* Set up MMU size */
  428. config1 &= ~(0x3f << 25);
  429. config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
  430. /* We unset some bits that we aren't emulating */
  431. config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
  432. MIPS_CONF1_WR | MIPS_CONF1_CA);
  433. kvm_write_c0_guest_config1(cop0, config1);
  434. /* Have config3, no tertiary/secondary caches implemented */
  435. kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
  436. /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
  437. /* Have config4, UserLocal */
  438. kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
  439. /* Have config5 */
  440. kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
  441. /* No config6 */
  442. kvm_write_c0_guest_config5(cop0, 0);
  443. /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
  444. kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
  445. /*
  446. * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
  447. */
  448. kvm_write_c0_guest_intctl(cop0, 0xFC000000);
  449. /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
  450. kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
  451. (vcpu_id & MIPS_EBASE_CPUNUM));
  452. return 0;
  453. }
  454. static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
  455. {
  456. return 0;
  457. }
  458. static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
  459. u64 __user *indices)
  460. {
  461. return 0;
  462. }
  463. static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
  464. const struct kvm_one_reg *reg,
  465. s64 *v)
  466. {
  467. switch (reg->id) {
  468. case KVM_REG_MIPS_CP0_COUNT:
  469. *v = kvm_mips_read_count(vcpu);
  470. break;
  471. case KVM_REG_MIPS_COUNT_CTL:
  472. *v = vcpu->arch.count_ctl;
  473. break;
  474. case KVM_REG_MIPS_COUNT_RESUME:
  475. *v = ktime_to_ns(vcpu->arch.count_resume);
  476. break;
  477. case KVM_REG_MIPS_COUNT_HZ:
  478. *v = vcpu->arch.count_hz;
  479. break;
  480. default:
  481. return -EINVAL;
  482. }
  483. return 0;
  484. }
  485. static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
  486. const struct kvm_one_reg *reg,
  487. s64 v)
  488. {
  489. struct mips_coproc *cop0 = vcpu->arch.cop0;
  490. int ret = 0;
  491. unsigned int cur, change;
  492. switch (reg->id) {
  493. case KVM_REG_MIPS_CP0_COUNT:
  494. kvm_mips_write_count(vcpu, v);
  495. break;
  496. case KVM_REG_MIPS_CP0_COMPARE:
  497. kvm_mips_write_compare(vcpu, v, false);
  498. break;
  499. case KVM_REG_MIPS_CP0_CAUSE:
  500. /*
  501. * If the timer is stopped or started (DC bit) it must look
  502. * atomic with changes to the interrupt pending bits (TI, IRQ5).
  503. * A timer interrupt should not happen in between.
  504. */
  505. if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
  506. if (v & CAUSEF_DC) {
  507. /* disable timer first */
  508. kvm_mips_count_disable_cause(vcpu);
  509. kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
  510. } else {
  511. /* enable timer last */
  512. kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
  513. kvm_mips_count_enable_cause(vcpu);
  514. }
  515. } else {
  516. kvm_write_c0_guest_cause(cop0, v);
  517. }
  518. break;
  519. case KVM_REG_MIPS_CP0_CONFIG:
  520. /* read-only for now */
  521. break;
  522. case KVM_REG_MIPS_CP0_CONFIG1:
  523. cur = kvm_read_c0_guest_config1(cop0);
  524. change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
  525. if (change) {
  526. v = cur ^ change;
  527. kvm_write_c0_guest_config1(cop0, v);
  528. }
  529. break;
  530. case KVM_REG_MIPS_CP0_CONFIG2:
  531. /* read-only for now */
  532. break;
  533. case KVM_REG_MIPS_CP0_CONFIG3:
  534. cur = kvm_read_c0_guest_config3(cop0);
  535. change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
  536. if (change) {
  537. v = cur ^ change;
  538. kvm_write_c0_guest_config3(cop0, v);
  539. }
  540. break;
  541. case KVM_REG_MIPS_CP0_CONFIG4:
  542. cur = kvm_read_c0_guest_config4(cop0);
  543. change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
  544. if (change) {
  545. v = cur ^ change;
  546. kvm_write_c0_guest_config4(cop0, v);
  547. }
  548. break;
  549. case KVM_REG_MIPS_CP0_CONFIG5:
  550. cur = kvm_read_c0_guest_config5(cop0);
  551. change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
  552. if (change) {
  553. v = cur ^ change;
  554. kvm_write_c0_guest_config5(cop0, v);
  555. }
  556. break;
  557. case KVM_REG_MIPS_COUNT_CTL:
  558. ret = kvm_mips_set_count_ctl(vcpu, v);
  559. break;
  560. case KVM_REG_MIPS_COUNT_RESUME:
  561. ret = kvm_mips_set_count_resume(vcpu, v);
  562. break;
  563. case KVM_REG_MIPS_COUNT_HZ:
  564. ret = kvm_mips_set_count_hz(vcpu, v);
  565. break;
  566. default:
  567. return -EINVAL;
  568. }
  569. return ret;
  570. }
  571. static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu)
  572. {
  573. kvm_lose_fpu(vcpu);
  574. return 0;
  575. }
  576. static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu)
  577. {
  578. return 0;
  579. }
  580. static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
  581. /* exit handlers */
  582. .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
  583. .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
  584. .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
  585. .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
  586. .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
  587. .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
  588. .handle_syscall = kvm_trap_emul_handle_syscall,
  589. .handle_res_inst = kvm_trap_emul_handle_res_inst,
  590. .handle_break = kvm_trap_emul_handle_break,
  591. .handle_trap = kvm_trap_emul_handle_trap,
  592. .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
  593. .handle_fpe = kvm_trap_emul_handle_fpe,
  594. .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
  595. .vm_init = kvm_trap_emul_vm_init,
  596. .vcpu_init = kvm_trap_emul_vcpu_init,
  597. .vcpu_setup = kvm_trap_emul_vcpu_setup,
  598. .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
  599. .queue_timer_int = kvm_mips_queue_timer_int_cb,
  600. .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
  601. .queue_io_int = kvm_mips_queue_io_int_cb,
  602. .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
  603. .irq_deliver = kvm_mips_irq_deliver_cb,
  604. .irq_clear = kvm_mips_irq_clear_cb,
  605. .num_regs = kvm_trap_emul_num_regs,
  606. .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
  607. .get_one_reg = kvm_trap_emul_get_one_reg,
  608. .set_one_reg = kvm_trap_emul_set_one_reg,
  609. .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
  610. .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs,
  611. };
  612. int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
  613. {
  614. *install_callbacks = &kvm_trap_emul_callbacks;
  615. return 0;
  616. }