trap_emul.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
  7. *
  8. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  9. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/err.h>
  13. #include <linux/module.h>
  14. #include <linux/vmalloc.h>
  15. #include <linux/kvm_host.h>
  16. #include "opcode.h"
  17. #include "interrupt.h"
  18. static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
  19. {
  20. gpa_t gpa;
  21. uint32_t kseg = KSEGX(gva);
  22. if ((kseg == CKSEG0) || (kseg == CKSEG1))
  23. gpa = CPHYSADDR(gva);
  24. else {
  25. kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
  26. kvm_mips_dump_host_tlbs();
  27. gpa = KVM_INVALID_ADDR;
  28. }
  29. kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
  30. return gpa;
  31. }
  32. static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
  33. {
  34. struct mips_coproc *cop0 = vcpu->arch.cop0;
  35. struct kvm_run *run = vcpu->run;
  36. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  37. unsigned long cause = vcpu->arch.host_cp0_cause;
  38. enum emulation_result er = EMULATE_DONE;
  39. int ret = RESUME_GUEST;
  40. if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
  41. /* FPU Unusable */
  42. if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
  43. (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
  44. /*
  45. * Unusable/no FPU in guest:
  46. * deliver guest COP1 Unusable Exception
  47. */
  48. er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
  49. } else {
  50. /* Restore FPU state */
  51. kvm_own_fpu(vcpu);
  52. er = EMULATE_DONE;
  53. }
  54. } else {
  55. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  56. }
  57. switch (er) {
  58. case EMULATE_DONE:
  59. ret = RESUME_GUEST;
  60. break;
  61. case EMULATE_FAIL:
  62. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  63. ret = RESUME_HOST;
  64. break;
  65. case EMULATE_WAIT:
  66. run->exit_reason = KVM_EXIT_INTR;
  67. ret = RESUME_HOST;
  68. break;
  69. default:
  70. BUG();
  71. }
  72. return ret;
  73. }
  74. static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
  75. {
  76. struct kvm_run *run = vcpu->run;
  77. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  78. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  79. unsigned long cause = vcpu->arch.host_cp0_cause;
  80. enum emulation_result er = EMULATE_DONE;
  81. int ret = RESUME_GUEST;
  82. if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
  83. || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
  84. kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
  85. cause, opc, badvaddr);
  86. er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
  87. if (er == EMULATE_DONE)
  88. ret = RESUME_GUEST;
  89. else {
  90. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  91. ret = RESUME_HOST;
  92. }
  93. } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
  94. /*
  95. * XXXKYMA: The guest kernel does not expect to get this fault
  96. * when we are not using HIGHMEM. Need to address this in a
  97. * HIGHMEM kernel
  98. */
  99. kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
  100. cause, opc, badvaddr);
  101. kvm_mips_dump_host_tlbs();
  102. kvm_arch_vcpu_dump_regs(vcpu);
  103. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  104. ret = RESUME_HOST;
  105. } else {
  106. kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
  107. cause, opc, badvaddr);
  108. kvm_mips_dump_host_tlbs();
  109. kvm_arch_vcpu_dump_regs(vcpu);
  110. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  111. ret = RESUME_HOST;
  112. }
  113. return ret;
  114. }
  115. static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
  116. {
  117. struct kvm_run *run = vcpu->run;
  118. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  119. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  120. unsigned long cause = vcpu->arch.host_cp0_cause;
  121. enum emulation_result er = EMULATE_DONE;
  122. int ret = RESUME_GUEST;
  123. if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
  124. && KVM_GUEST_KERNEL_MODE(vcpu)) {
  125. if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
  126. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  127. ret = RESUME_HOST;
  128. }
  129. } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
  130. || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
  131. kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
  132. cause, opc, badvaddr);
  133. er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
  134. if (er == EMULATE_DONE)
  135. ret = RESUME_GUEST;
  136. else {
  137. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  138. ret = RESUME_HOST;
  139. }
  140. } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
  141. /*
  142. * All KSEG0 faults are handled by KVM, as the guest kernel does
  143. * not expect to ever get them
  144. */
  145. if (kvm_mips_handle_kseg0_tlb_fault
  146. (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
  147. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  148. ret = RESUME_HOST;
  149. }
  150. } else {
  151. kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
  152. cause, opc, badvaddr);
  153. kvm_mips_dump_host_tlbs();
  154. kvm_arch_vcpu_dump_regs(vcpu);
  155. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  156. ret = RESUME_HOST;
  157. }
  158. return ret;
  159. }
  160. static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
  161. {
  162. struct kvm_run *run = vcpu->run;
  163. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  164. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  165. unsigned long cause = vcpu->arch.host_cp0_cause;
  166. enum emulation_result er = EMULATE_DONE;
  167. int ret = RESUME_GUEST;
  168. if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
  169. && KVM_GUEST_KERNEL_MODE(vcpu)) {
  170. if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
  171. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  172. ret = RESUME_HOST;
  173. }
  174. } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
  175. || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
  176. kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
  177. vcpu->arch.pc, badvaddr);
  178. /*
  179. * User Address (UA) fault, this could happen if
  180. * (1) TLB entry not present/valid in both Guest and shadow host
  181. * TLBs, in this case we pass on the fault to the guest
  182. * kernel and let it handle it.
  183. * (2) TLB entry is present in the Guest TLB but not in the
  184. * shadow, in this case we inject the TLB from the Guest TLB
  185. * into the shadow host TLB
  186. */
  187. er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
  188. if (er == EMULATE_DONE)
  189. ret = RESUME_GUEST;
  190. else {
  191. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  192. ret = RESUME_HOST;
  193. }
  194. } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
  195. if (kvm_mips_handle_kseg0_tlb_fault
  196. (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
  197. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  198. ret = RESUME_HOST;
  199. }
  200. } else {
  201. kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
  202. cause, opc, badvaddr);
  203. kvm_mips_dump_host_tlbs();
  204. kvm_arch_vcpu_dump_regs(vcpu);
  205. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  206. ret = RESUME_HOST;
  207. }
  208. return ret;
  209. }
  210. static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
  211. {
  212. struct kvm_run *run = vcpu->run;
  213. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  214. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  215. unsigned long cause = vcpu->arch.host_cp0_cause;
  216. enum emulation_result er = EMULATE_DONE;
  217. int ret = RESUME_GUEST;
  218. if (KVM_GUEST_KERNEL_MODE(vcpu)
  219. && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
  220. kvm_debug("Emulate Store to MMIO space\n");
  221. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  222. if (er == EMULATE_FAIL) {
  223. kvm_err("Emulate Store to MMIO space failed\n");
  224. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  225. ret = RESUME_HOST;
  226. } else {
  227. run->exit_reason = KVM_EXIT_MMIO;
  228. ret = RESUME_HOST;
  229. }
  230. } else {
  231. kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
  232. cause, opc, badvaddr);
  233. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  234. ret = RESUME_HOST;
  235. }
  236. return ret;
  237. }
  238. static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
  239. {
  240. struct kvm_run *run = vcpu->run;
  241. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  242. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  243. unsigned long cause = vcpu->arch.host_cp0_cause;
  244. enum emulation_result er = EMULATE_DONE;
  245. int ret = RESUME_GUEST;
  246. if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
  247. kvm_debug("Emulate Load from MMIO space @ %#lx\n", badvaddr);
  248. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  249. if (er == EMULATE_FAIL) {
  250. kvm_err("Emulate Load from MMIO space failed\n");
  251. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  252. ret = RESUME_HOST;
  253. } else {
  254. run->exit_reason = KVM_EXIT_MMIO;
  255. ret = RESUME_HOST;
  256. }
  257. } else {
  258. kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
  259. cause, opc, badvaddr);
  260. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  261. ret = RESUME_HOST;
  262. er = EMULATE_FAIL;
  263. }
  264. return ret;
  265. }
  266. static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
  267. {
  268. struct kvm_run *run = vcpu->run;
  269. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  270. unsigned long cause = vcpu->arch.host_cp0_cause;
  271. enum emulation_result er = EMULATE_DONE;
  272. int ret = RESUME_GUEST;
  273. er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
  274. if (er == EMULATE_DONE)
  275. ret = RESUME_GUEST;
  276. else {
  277. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  278. ret = RESUME_HOST;
  279. }
  280. return ret;
  281. }
  282. static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
  283. {
  284. struct kvm_run *run = vcpu->run;
  285. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  286. unsigned long cause = vcpu->arch.host_cp0_cause;
  287. enum emulation_result er = EMULATE_DONE;
  288. int ret = RESUME_GUEST;
  289. er = kvm_mips_handle_ri(cause, opc, run, vcpu);
  290. if (er == EMULATE_DONE)
  291. ret = RESUME_GUEST;
  292. else {
  293. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  294. ret = RESUME_HOST;
  295. }
  296. return ret;
  297. }
  298. static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
  299. {
  300. struct kvm_run *run = vcpu->run;
  301. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  302. unsigned long cause = vcpu->arch.host_cp0_cause;
  303. enum emulation_result er = EMULATE_DONE;
  304. int ret = RESUME_GUEST;
  305. er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
  306. if (er == EMULATE_DONE)
  307. ret = RESUME_GUEST;
  308. else {
  309. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  310. ret = RESUME_HOST;
  311. }
  312. return ret;
  313. }
  314. static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
  315. {
  316. struct kvm_run *run = vcpu->run;
  317. uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
  318. unsigned long cause = vcpu->arch.host_cp0_cause;
  319. enum emulation_result er = EMULATE_DONE;
  320. int ret = RESUME_GUEST;
  321. er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
  322. if (er == EMULATE_DONE) {
  323. ret = RESUME_GUEST;
  324. } else {
  325. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  326. ret = RESUME_HOST;
  327. }
  328. return ret;
  329. }
  330. static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
  331. {
  332. struct kvm_run *run = vcpu->run;
  333. uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
  334. unsigned long cause = vcpu->arch.host_cp0_cause;
  335. enum emulation_result er = EMULATE_DONE;
  336. int ret = RESUME_GUEST;
  337. er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
  338. if (er == EMULATE_DONE) {
  339. ret = RESUME_GUEST;
  340. } else {
  341. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  342. ret = RESUME_HOST;
  343. }
  344. return ret;
  345. }
  346. static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
  347. {
  348. struct kvm_run *run = vcpu->run;
  349. uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
  350. unsigned long cause = vcpu->arch.host_cp0_cause;
  351. enum emulation_result er = EMULATE_DONE;
  352. int ret = RESUME_GUEST;
  353. er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
  354. if (er == EMULATE_DONE) {
  355. ret = RESUME_GUEST;
  356. } else {
  357. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  358. ret = RESUME_HOST;
  359. }
  360. return ret;
  361. }
  362. /**
  363. * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
  364. * @vcpu: Virtual CPU context.
  365. *
  366. * Handle when the guest attempts to use MSA when it is disabled.
  367. */
  368. static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
  369. {
  370. struct mips_coproc *cop0 = vcpu->arch.cop0;
  371. struct kvm_run *run = vcpu->run;
  372. uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
  373. unsigned long cause = vcpu->arch.host_cp0_cause;
  374. enum emulation_result er = EMULATE_DONE;
  375. int ret = RESUME_GUEST;
  376. if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
  377. (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
  378. /*
  379. * No MSA in guest, or FPU enabled and not in FR=1 mode,
  380. * guest reserved instruction exception
  381. */
  382. er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
  383. } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
  384. /* MSA disabled by guest, guest MSA disabled exception */
  385. er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
  386. } else {
  387. /* Restore MSA/FPU state */
  388. kvm_own_msa(vcpu);
  389. er = EMULATE_DONE;
  390. }
  391. switch (er) {
  392. case EMULATE_DONE:
  393. ret = RESUME_GUEST;
  394. break;
  395. case EMULATE_FAIL:
  396. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  397. ret = RESUME_HOST;
  398. break;
  399. default:
  400. BUG();
  401. }
  402. return ret;
  403. }
  404. static int kvm_trap_emul_vm_init(struct kvm *kvm)
  405. {
  406. return 0;
  407. }
  408. static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
  409. {
  410. return 0;
  411. }
  412. static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
  413. {
  414. struct mips_coproc *cop0 = vcpu->arch.cop0;
  415. uint32_t config1;
  416. int vcpu_id = vcpu->vcpu_id;
  417. /*
  418. * Arch specific stuff, set up config registers properly so that the
  419. * guest will come up as expected, for now we simulate a MIPS 24kc
  420. */
  421. kvm_write_c0_guest_prid(cop0, 0x00019300);
  422. /* Have config1, Cacheable, noncoherent, write-back, write allocate */
  423. kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) |
  424. (0x1 << CP0C0_AR) |
  425. (MMU_TYPE_R4000 << CP0C0_MT));
  426. /* Read the cache characteristics from the host Config1 Register */
  427. config1 = (read_c0_config1() & ~0x7f);
  428. /* Set up MMU size */
  429. config1 &= ~(0x3f << 25);
  430. config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
  431. /* We unset some bits that we aren't emulating */
  432. config1 &=
  433. ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
  434. (1 << CP0C1_WR) | (1 << CP0C1_CA));
  435. kvm_write_c0_guest_config1(cop0, config1);
  436. /* Have config3, no tertiary/secondary caches implemented */
  437. kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
  438. /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
  439. /* Have config4, UserLocal */
  440. kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
  441. /* Have config5 */
  442. kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
  443. /* No config6 */
  444. kvm_write_c0_guest_config5(cop0, 0);
  445. /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
  446. kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
  447. /*
  448. * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
  449. */
  450. kvm_write_c0_guest_intctl(cop0, 0xFC000000);
  451. /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
  452. kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
  453. return 0;
  454. }
  455. static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
  456. const struct kvm_one_reg *reg,
  457. s64 *v)
  458. {
  459. switch (reg->id) {
  460. case KVM_REG_MIPS_CP0_COUNT:
  461. *v = kvm_mips_read_count(vcpu);
  462. break;
  463. case KVM_REG_MIPS_COUNT_CTL:
  464. *v = vcpu->arch.count_ctl;
  465. break;
  466. case KVM_REG_MIPS_COUNT_RESUME:
  467. *v = ktime_to_ns(vcpu->arch.count_resume);
  468. break;
  469. case KVM_REG_MIPS_COUNT_HZ:
  470. *v = vcpu->arch.count_hz;
  471. break;
  472. default:
  473. return -EINVAL;
  474. }
  475. return 0;
  476. }
  477. static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
  478. const struct kvm_one_reg *reg,
  479. s64 v)
  480. {
  481. struct mips_coproc *cop0 = vcpu->arch.cop0;
  482. int ret = 0;
  483. unsigned int cur, change;
  484. switch (reg->id) {
  485. case KVM_REG_MIPS_CP0_COUNT:
  486. kvm_mips_write_count(vcpu, v);
  487. break;
  488. case KVM_REG_MIPS_CP0_COMPARE:
  489. kvm_mips_write_compare(vcpu, v);
  490. break;
  491. case KVM_REG_MIPS_CP0_CAUSE:
  492. /*
  493. * If the timer is stopped or started (DC bit) it must look
  494. * atomic with changes to the interrupt pending bits (TI, IRQ5).
  495. * A timer interrupt should not happen in between.
  496. */
  497. if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
  498. if (v & CAUSEF_DC) {
  499. /* disable timer first */
  500. kvm_mips_count_disable_cause(vcpu);
  501. kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
  502. } else {
  503. /* enable timer last */
  504. kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
  505. kvm_mips_count_enable_cause(vcpu);
  506. }
  507. } else {
  508. kvm_write_c0_guest_cause(cop0, v);
  509. }
  510. break;
  511. case KVM_REG_MIPS_CP0_CONFIG:
  512. /* read-only for now */
  513. break;
  514. case KVM_REG_MIPS_CP0_CONFIG1:
  515. cur = kvm_read_c0_guest_config1(cop0);
  516. change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
  517. if (change) {
  518. v = cur ^ change;
  519. kvm_write_c0_guest_config1(cop0, v);
  520. }
  521. break;
  522. case KVM_REG_MIPS_CP0_CONFIG2:
  523. /* read-only for now */
  524. break;
  525. case KVM_REG_MIPS_CP0_CONFIG3:
  526. cur = kvm_read_c0_guest_config3(cop0);
  527. change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
  528. if (change) {
  529. v = cur ^ change;
  530. kvm_write_c0_guest_config3(cop0, v);
  531. }
  532. break;
  533. case KVM_REG_MIPS_CP0_CONFIG4:
  534. cur = kvm_read_c0_guest_config4(cop0);
  535. change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
  536. if (change) {
  537. v = cur ^ change;
  538. kvm_write_c0_guest_config4(cop0, v);
  539. }
  540. break;
  541. case KVM_REG_MIPS_CP0_CONFIG5:
  542. cur = kvm_read_c0_guest_config5(cop0);
  543. change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
  544. if (change) {
  545. v = cur ^ change;
  546. kvm_write_c0_guest_config5(cop0, v);
  547. }
  548. break;
  549. case KVM_REG_MIPS_COUNT_CTL:
  550. ret = kvm_mips_set_count_ctl(vcpu, v);
  551. break;
  552. case KVM_REG_MIPS_COUNT_RESUME:
  553. ret = kvm_mips_set_count_resume(vcpu, v);
  554. break;
  555. case KVM_REG_MIPS_COUNT_HZ:
  556. ret = kvm_mips_set_count_hz(vcpu, v);
  557. break;
  558. default:
  559. return -EINVAL;
  560. }
  561. return ret;
  562. }
  563. static int kvm_trap_emul_vcpu_get_regs(struct kvm_vcpu *vcpu)
  564. {
  565. kvm_lose_fpu(vcpu);
  566. return 0;
  567. }
  568. static int kvm_trap_emul_vcpu_set_regs(struct kvm_vcpu *vcpu)
  569. {
  570. return 0;
  571. }
  572. static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
  573. /* exit handlers */
  574. .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
  575. .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
  576. .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
  577. .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
  578. .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
  579. .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
  580. .handle_syscall = kvm_trap_emul_handle_syscall,
  581. .handle_res_inst = kvm_trap_emul_handle_res_inst,
  582. .handle_break = kvm_trap_emul_handle_break,
  583. .handle_trap = kvm_trap_emul_handle_trap,
  584. .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
  585. .handle_fpe = kvm_trap_emul_handle_fpe,
  586. .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
  587. .vm_init = kvm_trap_emul_vm_init,
  588. .vcpu_init = kvm_trap_emul_vcpu_init,
  589. .vcpu_setup = kvm_trap_emul_vcpu_setup,
  590. .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
  591. .queue_timer_int = kvm_mips_queue_timer_int_cb,
  592. .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
  593. .queue_io_int = kvm_mips_queue_io_int_cb,
  594. .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
  595. .irq_deliver = kvm_mips_irq_deliver_cb,
  596. .irq_clear = kvm_mips_irq_clear_cb,
  597. .get_one_reg = kvm_trap_emul_get_one_reg,
  598. .set_one_reg = kvm_trap_emul_set_one_reg,
  599. .vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
  600. .vcpu_set_regs = kvm_trap_emul_vcpu_set_regs,
  601. };
  602. int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
  603. {
  604. *install_callbacks = &kvm_trap_emul_callbacks;
  605. return 0;
  606. }