trap_emul.c 36 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
  7. *
  8. * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
  9. * Authors: Sanjay Lal <sanjayl@kymasys.com>
  10. */
  11. #include <linux/errno.h>
  12. #include <linux/err.h>
  13. #include <linux/kvm_host.h>
  14. #include <linux/log2.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/vmalloc.h>
  17. #include <asm/mmu_context.h>
  18. #include <asm/pgalloc.h>
  19. #include "interrupt.h"
  20. static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
  21. {
  22. gpa_t gpa;
  23. gva_t kseg = KSEGX(gva);
  24. gva_t gkseg = KVM_GUEST_KSEGX(gva);
  25. if ((kseg == CKSEG0) || (kseg == CKSEG1))
  26. gpa = CPHYSADDR(gva);
  27. else if (gkseg == KVM_GUEST_KSEG0)
  28. gpa = KVM_GUEST_CPHYSADDR(gva);
  29. else {
  30. kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
  31. kvm_mips_dump_host_tlbs();
  32. gpa = KVM_INVALID_ADDR;
  33. }
  34. kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
  35. return gpa;
  36. }
  37. static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu)
  38. {
  39. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  40. u32 cause = vcpu->arch.host_cp0_cause;
  41. u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
  42. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  43. u32 inst = 0;
  44. /*
  45. * Fetch the instruction.
  46. */
  47. if (cause & CAUSEF_BD)
  48. opc += 1;
  49. kvm_get_badinstr(opc, vcpu, &inst);
  50. kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
  51. exccode, opc, inst, badvaddr,
  52. kvm_read_c0_guest_status(vcpu->arch.cop0));
  53. kvm_arch_vcpu_dump_regs(vcpu);
  54. vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  55. return RESUME_HOST;
  56. }
  57. static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
  58. {
  59. struct mips_coproc *cop0 = vcpu->arch.cop0;
  60. struct kvm_run *run = vcpu->run;
  61. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  62. u32 cause = vcpu->arch.host_cp0_cause;
  63. enum emulation_result er = EMULATE_DONE;
  64. int ret = RESUME_GUEST;
  65. if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
  66. /* FPU Unusable */
  67. if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
  68. (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
  69. /*
  70. * Unusable/no FPU in guest:
  71. * deliver guest COP1 Unusable Exception
  72. */
  73. er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
  74. } else {
  75. /* Restore FPU state */
  76. kvm_own_fpu(vcpu);
  77. er = EMULATE_DONE;
  78. }
  79. } else {
  80. er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
  81. }
  82. switch (er) {
  83. case EMULATE_DONE:
  84. ret = RESUME_GUEST;
  85. break;
  86. case EMULATE_FAIL:
  87. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  88. ret = RESUME_HOST;
  89. break;
  90. case EMULATE_WAIT:
  91. run->exit_reason = KVM_EXIT_INTR;
  92. ret = RESUME_HOST;
  93. break;
  94. case EMULATE_HYPERCALL:
  95. ret = kvm_mips_handle_hypcall(vcpu);
  96. break;
  97. default:
  98. BUG();
  99. }
  100. return ret;
  101. }
  102. static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
  103. struct kvm_vcpu *vcpu)
  104. {
  105. enum emulation_result er;
  106. union mips_instruction inst;
  107. int err;
  108. /* A code fetch fault doesn't count as an MMIO */
  109. if (kvm_is_ifetch_fault(&vcpu->arch)) {
  110. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  111. return RESUME_HOST;
  112. }
  113. /* Fetch the instruction. */
  114. if (cause & CAUSEF_BD)
  115. opc += 1;
  116. err = kvm_get_badinstr(opc, vcpu, &inst.word);
  117. if (err) {
  118. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  119. return RESUME_HOST;
  120. }
  121. /* Emulate the load */
  122. er = kvm_mips_emulate_load(inst, cause, run, vcpu);
  123. if (er == EMULATE_FAIL) {
  124. kvm_err("Emulate load from MMIO space failed\n");
  125. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  126. } else {
  127. run->exit_reason = KVM_EXIT_MMIO;
  128. }
  129. return RESUME_HOST;
  130. }
  131. static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
  132. struct kvm_vcpu *vcpu)
  133. {
  134. enum emulation_result er;
  135. union mips_instruction inst;
  136. int err;
  137. /* Fetch the instruction. */
  138. if (cause & CAUSEF_BD)
  139. opc += 1;
  140. err = kvm_get_badinstr(opc, vcpu, &inst.word);
  141. if (err) {
  142. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  143. return RESUME_HOST;
  144. }
  145. /* Emulate the store */
  146. er = kvm_mips_emulate_store(inst, cause, run, vcpu);
  147. if (er == EMULATE_FAIL) {
  148. kvm_err("Emulate store to MMIO space failed\n");
  149. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  150. } else {
  151. run->exit_reason = KVM_EXIT_MMIO;
  152. }
  153. return RESUME_HOST;
  154. }
  155. static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run,
  156. struct kvm_vcpu *vcpu, bool store)
  157. {
  158. if (store)
  159. return kvm_mips_bad_store(cause, opc, run, vcpu);
  160. else
  161. return kvm_mips_bad_load(cause, opc, run, vcpu);
  162. }
  163. static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
  164. {
  165. struct mips_coproc *cop0 = vcpu->arch.cop0;
  166. struct kvm_run *run = vcpu->run;
  167. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  168. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  169. u32 cause = vcpu->arch.host_cp0_cause;
  170. struct kvm_mips_tlb *tlb;
  171. unsigned long entryhi;
  172. int index;
  173. if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
  174. || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
  175. /*
  176. * First find the mapping in the guest TLB. If the failure to
  177. * write was due to the guest TLB, it should be up to the guest
  178. * to handle it.
  179. */
  180. entryhi = (badvaddr & VPN2_MASK) |
  181. (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
  182. index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
  183. /*
  184. * These should never happen.
  185. * They would indicate stale host TLB entries.
  186. */
  187. if (unlikely(index < 0)) {
  188. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  189. return RESUME_HOST;
  190. }
  191. tlb = vcpu->arch.guest_tlb + index;
  192. if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
  193. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  194. return RESUME_HOST;
  195. }
  196. /*
  197. * Guest entry not dirty? That would explain the TLB modified
  198. * exception. Relay that on to the guest so it can handle it.
  199. */
  200. if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
  201. kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
  202. return RESUME_GUEST;
  203. }
  204. if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
  205. true))
  206. /* Not writable, needs handling as MMIO */
  207. return kvm_mips_bad_store(cause, opc, run, vcpu);
  208. return RESUME_GUEST;
  209. } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
  210. if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
  211. /* Not writable, needs handling as MMIO */
  212. return kvm_mips_bad_store(cause, opc, run, vcpu);
  213. return RESUME_GUEST;
  214. } else {
  215. /* host kernel addresses are all handled as MMIO */
  216. return kvm_mips_bad_store(cause, opc, run, vcpu);
  217. }
  218. }
  219. static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
  220. {
  221. struct kvm_run *run = vcpu->run;
  222. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  223. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  224. u32 cause = vcpu->arch.host_cp0_cause;
  225. enum emulation_result er = EMULATE_DONE;
  226. int ret = RESUME_GUEST;
  227. if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
  228. && KVM_GUEST_KERNEL_MODE(vcpu)) {
  229. if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
  230. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  231. ret = RESUME_HOST;
  232. }
  233. } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
  234. || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
  235. kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
  236. store ? "ST" : "LD", cause, opc, badvaddr);
  237. /*
  238. * User Address (UA) fault, this could happen if
  239. * (1) TLB entry not present/valid in both Guest and shadow host
  240. * TLBs, in this case we pass on the fault to the guest
  241. * kernel and let it handle it.
  242. * (2) TLB entry is present in the Guest TLB but not in the
  243. * shadow, in this case we inject the TLB from the Guest TLB
  244. * into the shadow host TLB
  245. */
  246. er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
  247. if (er == EMULATE_DONE)
  248. ret = RESUME_GUEST;
  249. else {
  250. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  251. ret = RESUME_HOST;
  252. }
  253. } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
  254. /*
  255. * All KSEG0 faults are handled by KVM, as the guest kernel does
  256. * not expect to ever get them
  257. */
  258. if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
  259. ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
  260. } else if (KVM_GUEST_KERNEL_MODE(vcpu)
  261. && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
  262. /*
  263. * With EVA we may get a TLB exception instead of an address
  264. * error when the guest performs MMIO to KSeg1 addresses.
  265. */
  266. ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
  267. } else {
  268. kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
  269. store ? "ST" : "LD", cause, opc, badvaddr);
  270. kvm_mips_dump_host_tlbs();
  271. kvm_arch_vcpu_dump_regs(vcpu);
  272. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  273. ret = RESUME_HOST;
  274. }
  275. return ret;
  276. }
  277. static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
  278. {
  279. return kvm_trap_emul_handle_tlb_miss(vcpu, true);
  280. }
  281. static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
  282. {
  283. return kvm_trap_emul_handle_tlb_miss(vcpu, false);
  284. }
  285. static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
  286. {
  287. struct kvm_run *run = vcpu->run;
  288. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  289. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  290. u32 cause = vcpu->arch.host_cp0_cause;
  291. int ret = RESUME_GUEST;
  292. if (KVM_GUEST_KERNEL_MODE(vcpu)
  293. && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
  294. ret = kvm_mips_bad_store(cause, opc, run, vcpu);
  295. } else {
  296. kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
  297. cause, opc, badvaddr);
  298. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  299. ret = RESUME_HOST;
  300. }
  301. return ret;
  302. }
  303. static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
  304. {
  305. struct kvm_run *run = vcpu->run;
  306. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  307. unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
  308. u32 cause = vcpu->arch.host_cp0_cause;
  309. int ret = RESUME_GUEST;
  310. if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
  311. ret = kvm_mips_bad_load(cause, opc, run, vcpu);
  312. } else {
  313. kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
  314. cause, opc, badvaddr);
  315. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  316. ret = RESUME_HOST;
  317. }
  318. return ret;
  319. }
  320. static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
  321. {
  322. struct kvm_run *run = vcpu->run;
  323. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  324. u32 cause = vcpu->arch.host_cp0_cause;
  325. enum emulation_result er = EMULATE_DONE;
  326. int ret = RESUME_GUEST;
  327. er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
  328. if (er == EMULATE_DONE)
  329. ret = RESUME_GUEST;
  330. else {
  331. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  332. ret = RESUME_HOST;
  333. }
  334. return ret;
  335. }
  336. static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
  337. {
  338. struct kvm_run *run = vcpu->run;
  339. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  340. u32 cause = vcpu->arch.host_cp0_cause;
  341. enum emulation_result er = EMULATE_DONE;
  342. int ret = RESUME_GUEST;
  343. er = kvm_mips_handle_ri(cause, opc, run, vcpu);
  344. if (er == EMULATE_DONE)
  345. ret = RESUME_GUEST;
  346. else {
  347. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  348. ret = RESUME_HOST;
  349. }
  350. return ret;
  351. }
  352. static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
  353. {
  354. struct kvm_run *run = vcpu->run;
  355. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  356. u32 cause = vcpu->arch.host_cp0_cause;
  357. enum emulation_result er = EMULATE_DONE;
  358. int ret = RESUME_GUEST;
  359. er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
  360. if (er == EMULATE_DONE)
  361. ret = RESUME_GUEST;
  362. else {
  363. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  364. ret = RESUME_HOST;
  365. }
  366. return ret;
  367. }
  368. static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
  369. {
  370. struct kvm_run *run = vcpu->run;
  371. u32 __user *opc = (u32 __user *)vcpu->arch.pc;
  372. u32 cause = vcpu->arch.host_cp0_cause;
  373. enum emulation_result er = EMULATE_DONE;
  374. int ret = RESUME_GUEST;
  375. er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
  376. if (er == EMULATE_DONE) {
  377. ret = RESUME_GUEST;
  378. } else {
  379. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  380. ret = RESUME_HOST;
  381. }
  382. return ret;
  383. }
  384. static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
  385. {
  386. struct kvm_run *run = vcpu->run;
  387. u32 __user *opc = (u32 __user *)vcpu->arch.pc;
  388. u32 cause = vcpu->arch.host_cp0_cause;
  389. enum emulation_result er = EMULATE_DONE;
  390. int ret = RESUME_GUEST;
  391. er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
  392. if (er == EMULATE_DONE) {
  393. ret = RESUME_GUEST;
  394. } else {
  395. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  396. ret = RESUME_HOST;
  397. }
  398. return ret;
  399. }
  400. static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
  401. {
  402. struct kvm_run *run = vcpu->run;
  403. u32 __user *opc = (u32 __user *)vcpu->arch.pc;
  404. u32 cause = vcpu->arch.host_cp0_cause;
  405. enum emulation_result er = EMULATE_DONE;
  406. int ret = RESUME_GUEST;
  407. er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
  408. if (er == EMULATE_DONE) {
  409. ret = RESUME_GUEST;
  410. } else {
  411. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  412. ret = RESUME_HOST;
  413. }
  414. return ret;
  415. }
  416. /**
  417. * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
  418. * @vcpu: Virtual CPU context.
  419. *
  420. * Handle when the guest attempts to use MSA when it is disabled.
  421. */
  422. static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
  423. {
  424. struct mips_coproc *cop0 = vcpu->arch.cop0;
  425. struct kvm_run *run = vcpu->run;
  426. u32 __user *opc = (u32 __user *) vcpu->arch.pc;
  427. u32 cause = vcpu->arch.host_cp0_cause;
  428. enum emulation_result er = EMULATE_DONE;
  429. int ret = RESUME_GUEST;
  430. if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
  431. (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
  432. /*
  433. * No MSA in guest, or FPU enabled and not in FR=1 mode,
  434. * guest reserved instruction exception
  435. */
  436. er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
  437. } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
  438. /* MSA disabled by guest, guest MSA disabled exception */
  439. er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
  440. } else {
  441. /* Restore MSA/FPU state */
  442. kvm_own_msa(vcpu);
  443. er = EMULATE_DONE;
  444. }
  445. switch (er) {
  446. case EMULATE_DONE:
  447. ret = RESUME_GUEST;
  448. break;
  449. case EMULATE_FAIL:
  450. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  451. ret = RESUME_HOST;
  452. break;
  453. default:
  454. BUG();
  455. }
  456. return ret;
  457. }
  458. static int kvm_trap_emul_hardware_enable(void)
  459. {
  460. return 0;
  461. }
  462. static void kvm_trap_emul_hardware_disable(void)
  463. {
  464. }
  465. static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
  466. {
  467. int r;
  468. switch (ext) {
  469. case KVM_CAP_MIPS_TE:
  470. r = 1;
  471. break;
  472. default:
  473. r = 0;
  474. break;
  475. }
  476. return r;
  477. }
  478. static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
  479. {
  480. struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
  481. struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
  482. /*
  483. * Allocate GVA -> HPA page tables.
  484. * MIPS doesn't use the mm_struct pointer argument.
  485. */
  486. kern_mm->pgd = pgd_alloc(kern_mm);
  487. if (!kern_mm->pgd)
  488. return -ENOMEM;
  489. user_mm->pgd = pgd_alloc(user_mm);
  490. if (!user_mm->pgd) {
  491. pgd_free(kern_mm, kern_mm->pgd);
  492. return -ENOMEM;
  493. }
  494. return 0;
  495. }
  496. static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
  497. {
  498. /* Don't free host kernel page tables copied from init_mm.pgd */
  499. const unsigned long end = 0x80000000;
  500. unsigned long pgd_va, pud_va, pmd_va;
  501. pud_t *pud;
  502. pmd_t *pmd;
  503. pte_t *pte;
  504. int i, j, k;
  505. for (i = 0; i < USER_PTRS_PER_PGD; i++) {
  506. if (pgd_none(pgd[i]))
  507. continue;
  508. pgd_va = (unsigned long)i << PGDIR_SHIFT;
  509. if (pgd_va >= end)
  510. break;
  511. pud = pud_offset(pgd + i, 0);
  512. for (j = 0; j < PTRS_PER_PUD; j++) {
  513. if (pud_none(pud[j]))
  514. continue;
  515. pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT);
  516. if (pud_va >= end)
  517. break;
  518. pmd = pmd_offset(pud + j, 0);
  519. for (k = 0; k < PTRS_PER_PMD; k++) {
  520. if (pmd_none(pmd[k]))
  521. continue;
  522. pmd_va = pud_va | (k << PMD_SHIFT);
  523. if (pmd_va >= end)
  524. break;
  525. pte = pte_offset(pmd + k, 0);
  526. pte_free_kernel(NULL, pte);
  527. }
  528. pmd_free(NULL, pmd);
  529. }
  530. pud_free(NULL, pud);
  531. }
  532. pgd_free(NULL, pgd);
  533. }
  534. static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu)
  535. {
  536. kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd);
  537. kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd);
  538. }
  539. static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
  540. {
  541. struct mips_coproc *cop0 = vcpu->arch.cop0;
  542. u32 config, config1;
  543. int vcpu_id = vcpu->vcpu_id;
  544. /* Start off the timer at 100 MHz */
  545. kvm_mips_init_count(vcpu, 100*1000*1000);
  546. /*
  547. * Arch specific stuff, set up config registers properly so that the
  548. * guest will come up as expected
  549. */
  550. #ifndef CONFIG_CPU_MIPSR6
  551. /* r2-r5, simulate a MIPS 24kc */
  552. kvm_write_c0_guest_prid(cop0, 0x00019300);
  553. #else
  554. /* r6+, simulate a generic QEMU machine */
  555. kvm_write_c0_guest_prid(cop0, 0x00010000);
  556. #endif
  557. /*
  558. * Have config1, Cacheable, noncoherent, write-back, write allocate.
  559. * Endianness, arch revision & virtually tagged icache should match
  560. * host.
  561. */
  562. config = read_c0_config() & MIPS_CONF_AR;
  563. config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
  564. #ifdef CONFIG_CPU_BIG_ENDIAN
  565. config |= CONF_BE;
  566. #endif
  567. if (cpu_has_vtag_icache)
  568. config |= MIPS_CONF_VI;
  569. kvm_write_c0_guest_config(cop0, config);
  570. /* Read the cache characteristics from the host Config1 Register */
  571. config1 = (read_c0_config1() & ~0x7f);
  572. /* DCache line size not correctly reported in Config1 on Octeon CPUs */
  573. if (cpu_dcache_line_size()) {
  574. config1 &= ~MIPS_CONF1_DL;
  575. config1 |= ((ilog2(cpu_dcache_line_size()) - 1) <<
  576. MIPS_CONF1_DL_SHF) & MIPS_CONF1_DL;
  577. }
  578. /* Set up MMU size */
  579. config1 &= ~(0x3f << 25);
  580. config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
  581. /* We unset some bits that we aren't emulating */
  582. config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
  583. MIPS_CONF1_WR | MIPS_CONF1_CA);
  584. kvm_write_c0_guest_config1(cop0, config1);
  585. /* Have config3, no tertiary/secondary caches implemented */
  586. kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
  587. /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
  588. /* Have config4, UserLocal */
  589. kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
  590. /* Have config5 */
  591. kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
  592. /* No config6 */
  593. kvm_write_c0_guest_config5(cop0, 0);
  594. /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
  595. kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
  596. /* Status */
  597. kvm_write_c0_guest_status(cop0, ST0_BEV | ST0_ERL);
  598. /*
  599. * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
  600. */
  601. kvm_write_c0_guest_intctl(cop0, 0xFC000000);
  602. /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
  603. kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
  604. (vcpu_id & MIPS_EBASE_CPUNUM));
  605. /* Put PC at guest reset vector */
  606. vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000);
  607. return 0;
  608. }
  609. static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm)
  610. {
  611. /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
  612. kvm_flush_remote_tlbs(kvm);
  613. }
  614. static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm,
  615. const struct kvm_memory_slot *slot)
  616. {
  617. kvm_trap_emul_flush_shadow_all(kvm);
  618. }
  619. static u64 kvm_trap_emul_get_one_regs[] = {
  620. KVM_REG_MIPS_CP0_INDEX,
  621. KVM_REG_MIPS_CP0_ENTRYLO0,
  622. KVM_REG_MIPS_CP0_ENTRYLO1,
  623. KVM_REG_MIPS_CP0_CONTEXT,
  624. KVM_REG_MIPS_CP0_USERLOCAL,
  625. KVM_REG_MIPS_CP0_PAGEMASK,
  626. KVM_REG_MIPS_CP0_WIRED,
  627. KVM_REG_MIPS_CP0_HWRENA,
  628. KVM_REG_MIPS_CP0_BADVADDR,
  629. KVM_REG_MIPS_CP0_COUNT,
  630. KVM_REG_MIPS_CP0_ENTRYHI,
  631. KVM_REG_MIPS_CP0_COMPARE,
  632. KVM_REG_MIPS_CP0_STATUS,
  633. KVM_REG_MIPS_CP0_INTCTL,
  634. KVM_REG_MIPS_CP0_CAUSE,
  635. KVM_REG_MIPS_CP0_EPC,
  636. KVM_REG_MIPS_CP0_PRID,
  637. KVM_REG_MIPS_CP0_EBASE,
  638. KVM_REG_MIPS_CP0_CONFIG,
  639. KVM_REG_MIPS_CP0_CONFIG1,
  640. KVM_REG_MIPS_CP0_CONFIG2,
  641. KVM_REG_MIPS_CP0_CONFIG3,
  642. KVM_REG_MIPS_CP0_CONFIG4,
  643. KVM_REG_MIPS_CP0_CONFIG5,
  644. KVM_REG_MIPS_CP0_CONFIG7,
  645. KVM_REG_MIPS_CP0_ERROREPC,
  646. KVM_REG_MIPS_CP0_KSCRATCH1,
  647. KVM_REG_MIPS_CP0_KSCRATCH2,
  648. KVM_REG_MIPS_CP0_KSCRATCH3,
  649. KVM_REG_MIPS_CP0_KSCRATCH4,
  650. KVM_REG_MIPS_CP0_KSCRATCH5,
  651. KVM_REG_MIPS_CP0_KSCRATCH6,
  652. KVM_REG_MIPS_COUNT_CTL,
  653. KVM_REG_MIPS_COUNT_RESUME,
  654. KVM_REG_MIPS_COUNT_HZ,
  655. };
  656. static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
  657. {
  658. return ARRAY_SIZE(kvm_trap_emul_get_one_regs);
  659. }
  660. static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
  661. u64 __user *indices)
  662. {
  663. if (copy_to_user(indices, kvm_trap_emul_get_one_regs,
  664. sizeof(kvm_trap_emul_get_one_regs)))
  665. return -EFAULT;
  666. indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs);
  667. return 0;
  668. }
  669. static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
  670. const struct kvm_one_reg *reg,
  671. s64 *v)
  672. {
  673. struct mips_coproc *cop0 = vcpu->arch.cop0;
  674. switch (reg->id) {
  675. case KVM_REG_MIPS_CP0_INDEX:
  676. *v = (long)kvm_read_c0_guest_index(cop0);
  677. break;
  678. case KVM_REG_MIPS_CP0_ENTRYLO0:
  679. *v = kvm_read_c0_guest_entrylo0(cop0);
  680. break;
  681. case KVM_REG_MIPS_CP0_ENTRYLO1:
  682. *v = kvm_read_c0_guest_entrylo1(cop0);
  683. break;
  684. case KVM_REG_MIPS_CP0_CONTEXT:
  685. *v = (long)kvm_read_c0_guest_context(cop0);
  686. break;
  687. case KVM_REG_MIPS_CP0_USERLOCAL:
  688. *v = (long)kvm_read_c0_guest_userlocal(cop0);
  689. break;
  690. case KVM_REG_MIPS_CP0_PAGEMASK:
  691. *v = (long)kvm_read_c0_guest_pagemask(cop0);
  692. break;
  693. case KVM_REG_MIPS_CP0_WIRED:
  694. *v = (long)kvm_read_c0_guest_wired(cop0);
  695. break;
  696. case KVM_REG_MIPS_CP0_HWRENA:
  697. *v = (long)kvm_read_c0_guest_hwrena(cop0);
  698. break;
  699. case KVM_REG_MIPS_CP0_BADVADDR:
  700. *v = (long)kvm_read_c0_guest_badvaddr(cop0);
  701. break;
  702. case KVM_REG_MIPS_CP0_ENTRYHI:
  703. *v = (long)kvm_read_c0_guest_entryhi(cop0);
  704. break;
  705. case KVM_REG_MIPS_CP0_COMPARE:
  706. *v = (long)kvm_read_c0_guest_compare(cop0);
  707. break;
  708. case KVM_REG_MIPS_CP0_STATUS:
  709. *v = (long)kvm_read_c0_guest_status(cop0);
  710. break;
  711. case KVM_REG_MIPS_CP0_INTCTL:
  712. *v = (long)kvm_read_c0_guest_intctl(cop0);
  713. break;
  714. case KVM_REG_MIPS_CP0_CAUSE:
  715. *v = (long)kvm_read_c0_guest_cause(cop0);
  716. break;
  717. case KVM_REG_MIPS_CP0_EPC:
  718. *v = (long)kvm_read_c0_guest_epc(cop0);
  719. break;
  720. case KVM_REG_MIPS_CP0_PRID:
  721. *v = (long)kvm_read_c0_guest_prid(cop0);
  722. break;
  723. case KVM_REG_MIPS_CP0_EBASE:
  724. *v = (long)kvm_read_c0_guest_ebase(cop0);
  725. break;
  726. case KVM_REG_MIPS_CP0_CONFIG:
  727. *v = (long)kvm_read_c0_guest_config(cop0);
  728. break;
  729. case KVM_REG_MIPS_CP0_CONFIG1:
  730. *v = (long)kvm_read_c0_guest_config1(cop0);
  731. break;
  732. case KVM_REG_MIPS_CP0_CONFIG2:
  733. *v = (long)kvm_read_c0_guest_config2(cop0);
  734. break;
  735. case KVM_REG_MIPS_CP0_CONFIG3:
  736. *v = (long)kvm_read_c0_guest_config3(cop0);
  737. break;
  738. case KVM_REG_MIPS_CP0_CONFIG4:
  739. *v = (long)kvm_read_c0_guest_config4(cop0);
  740. break;
  741. case KVM_REG_MIPS_CP0_CONFIG5:
  742. *v = (long)kvm_read_c0_guest_config5(cop0);
  743. break;
  744. case KVM_REG_MIPS_CP0_CONFIG7:
  745. *v = (long)kvm_read_c0_guest_config7(cop0);
  746. break;
  747. case KVM_REG_MIPS_CP0_COUNT:
  748. *v = kvm_mips_read_count(vcpu);
  749. break;
  750. case KVM_REG_MIPS_COUNT_CTL:
  751. *v = vcpu->arch.count_ctl;
  752. break;
  753. case KVM_REG_MIPS_COUNT_RESUME:
  754. *v = ktime_to_ns(vcpu->arch.count_resume);
  755. break;
  756. case KVM_REG_MIPS_COUNT_HZ:
  757. *v = vcpu->arch.count_hz;
  758. break;
  759. case KVM_REG_MIPS_CP0_ERROREPC:
  760. *v = (long)kvm_read_c0_guest_errorepc(cop0);
  761. break;
  762. case KVM_REG_MIPS_CP0_KSCRATCH1:
  763. *v = (long)kvm_read_c0_guest_kscratch1(cop0);
  764. break;
  765. case KVM_REG_MIPS_CP0_KSCRATCH2:
  766. *v = (long)kvm_read_c0_guest_kscratch2(cop0);
  767. break;
  768. case KVM_REG_MIPS_CP0_KSCRATCH3:
  769. *v = (long)kvm_read_c0_guest_kscratch3(cop0);
  770. break;
  771. case KVM_REG_MIPS_CP0_KSCRATCH4:
  772. *v = (long)kvm_read_c0_guest_kscratch4(cop0);
  773. break;
  774. case KVM_REG_MIPS_CP0_KSCRATCH5:
  775. *v = (long)kvm_read_c0_guest_kscratch5(cop0);
  776. break;
  777. case KVM_REG_MIPS_CP0_KSCRATCH6:
  778. *v = (long)kvm_read_c0_guest_kscratch6(cop0);
  779. break;
  780. default:
  781. return -EINVAL;
  782. }
  783. return 0;
  784. }
  785. static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
  786. const struct kvm_one_reg *reg,
  787. s64 v)
  788. {
  789. struct mips_coproc *cop0 = vcpu->arch.cop0;
  790. int ret = 0;
  791. unsigned int cur, change;
  792. switch (reg->id) {
  793. case KVM_REG_MIPS_CP0_INDEX:
  794. kvm_write_c0_guest_index(cop0, v);
  795. break;
  796. case KVM_REG_MIPS_CP0_ENTRYLO0:
  797. kvm_write_c0_guest_entrylo0(cop0, v);
  798. break;
  799. case KVM_REG_MIPS_CP0_ENTRYLO1:
  800. kvm_write_c0_guest_entrylo1(cop0, v);
  801. break;
  802. case KVM_REG_MIPS_CP0_CONTEXT:
  803. kvm_write_c0_guest_context(cop0, v);
  804. break;
  805. case KVM_REG_MIPS_CP0_USERLOCAL:
  806. kvm_write_c0_guest_userlocal(cop0, v);
  807. break;
  808. case KVM_REG_MIPS_CP0_PAGEMASK:
  809. kvm_write_c0_guest_pagemask(cop0, v);
  810. break;
  811. case KVM_REG_MIPS_CP0_WIRED:
  812. kvm_write_c0_guest_wired(cop0, v);
  813. break;
  814. case KVM_REG_MIPS_CP0_HWRENA:
  815. kvm_write_c0_guest_hwrena(cop0, v);
  816. break;
  817. case KVM_REG_MIPS_CP0_BADVADDR:
  818. kvm_write_c0_guest_badvaddr(cop0, v);
  819. break;
  820. case KVM_REG_MIPS_CP0_ENTRYHI:
  821. kvm_write_c0_guest_entryhi(cop0, v);
  822. break;
  823. case KVM_REG_MIPS_CP0_STATUS:
  824. kvm_write_c0_guest_status(cop0, v);
  825. break;
  826. case KVM_REG_MIPS_CP0_INTCTL:
  827. /* No VInt, so no VS, read-only for now */
  828. break;
  829. case KVM_REG_MIPS_CP0_EPC:
  830. kvm_write_c0_guest_epc(cop0, v);
  831. break;
  832. case KVM_REG_MIPS_CP0_PRID:
  833. kvm_write_c0_guest_prid(cop0, v);
  834. break;
  835. case KVM_REG_MIPS_CP0_EBASE:
  836. /*
  837. * Allow core number to be written, but the exception base must
  838. * remain in guest KSeg0.
  839. */
  840. kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM,
  841. v);
  842. break;
  843. case KVM_REG_MIPS_CP0_COUNT:
  844. kvm_mips_write_count(vcpu, v);
  845. break;
  846. case KVM_REG_MIPS_CP0_COMPARE:
  847. kvm_mips_write_compare(vcpu, v, false);
  848. break;
  849. case KVM_REG_MIPS_CP0_CAUSE:
  850. /*
  851. * If the timer is stopped or started (DC bit) it must look
  852. * atomic with changes to the interrupt pending bits (TI, IRQ5).
  853. * A timer interrupt should not happen in between.
  854. */
  855. if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
  856. if (v & CAUSEF_DC) {
  857. /* disable timer first */
  858. kvm_mips_count_disable_cause(vcpu);
  859. kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
  860. v);
  861. } else {
  862. /* enable timer last */
  863. kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC,
  864. v);
  865. kvm_mips_count_enable_cause(vcpu);
  866. }
  867. } else {
  868. kvm_write_c0_guest_cause(cop0, v);
  869. }
  870. break;
  871. case KVM_REG_MIPS_CP0_CONFIG:
  872. /* read-only for now */
  873. break;
  874. case KVM_REG_MIPS_CP0_CONFIG1:
  875. cur = kvm_read_c0_guest_config1(cop0);
  876. change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
  877. if (change) {
  878. v = cur ^ change;
  879. kvm_write_c0_guest_config1(cop0, v);
  880. }
  881. break;
  882. case KVM_REG_MIPS_CP0_CONFIG2:
  883. /* read-only for now */
  884. break;
  885. case KVM_REG_MIPS_CP0_CONFIG3:
  886. cur = kvm_read_c0_guest_config3(cop0);
  887. change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
  888. if (change) {
  889. v = cur ^ change;
  890. kvm_write_c0_guest_config3(cop0, v);
  891. }
  892. break;
  893. case KVM_REG_MIPS_CP0_CONFIG4:
  894. cur = kvm_read_c0_guest_config4(cop0);
  895. change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
  896. if (change) {
  897. v = cur ^ change;
  898. kvm_write_c0_guest_config4(cop0, v);
  899. }
  900. break;
  901. case KVM_REG_MIPS_CP0_CONFIG5:
  902. cur = kvm_read_c0_guest_config5(cop0);
  903. change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
  904. if (change) {
  905. v = cur ^ change;
  906. kvm_write_c0_guest_config5(cop0, v);
  907. }
  908. break;
  909. case KVM_REG_MIPS_CP0_CONFIG7:
  910. /* writes ignored */
  911. break;
  912. case KVM_REG_MIPS_COUNT_CTL:
  913. ret = kvm_mips_set_count_ctl(vcpu, v);
  914. break;
  915. case KVM_REG_MIPS_COUNT_RESUME:
  916. ret = kvm_mips_set_count_resume(vcpu, v);
  917. break;
  918. case KVM_REG_MIPS_COUNT_HZ:
  919. ret = kvm_mips_set_count_hz(vcpu, v);
  920. break;
  921. case KVM_REG_MIPS_CP0_ERROREPC:
  922. kvm_write_c0_guest_errorepc(cop0, v);
  923. break;
  924. case KVM_REG_MIPS_CP0_KSCRATCH1:
  925. kvm_write_c0_guest_kscratch1(cop0, v);
  926. break;
  927. case KVM_REG_MIPS_CP0_KSCRATCH2:
  928. kvm_write_c0_guest_kscratch2(cop0, v);
  929. break;
  930. case KVM_REG_MIPS_CP0_KSCRATCH3:
  931. kvm_write_c0_guest_kscratch3(cop0, v);
  932. break;
  933. case KVM_REG_MIPS_CP0_KSCRATCH4:
  934. kvm_write_c0_guest_kscratch4(cop0, v);
  935. break;
  936. case KVM_REG_MIPS_CP0_KSCRATCH5:
  937. kvm_write_c0_guest_kscratch5(cop0, v);
  938. break;
  939. case KVM_REG_MIPS_CP0_KSCRATCH6:
  940. kvm_write_c0_guest_kscratch6(cop0, v);
  941. break;
  942. default:
  943. return -EINVAL;
  944. }
  945. return ret;
  946. }
  947. static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
  948. {
  949. struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
  950. struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
  951. struct mm_struct *mm;
  952. /*
  953. * Were we in guest context? If so, restore the appropriate ASID based
  954. * on the mode of the Guest (Kernel/User).
  955. */
  956. if (current->flags & PF_VCPU) {
  957. mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
  958. if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
  959. asid_version_mask(cpu))
  960. get_new_mmu_context(mm, cpu);
  961. write_c0_entryhi(cpu_asid(cpu, mm));
  962. TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
  963. kvm_mips_suspend_mm(cpu);
  964. ehb();
  965. }
  966. return 0;
  967. }
  968. static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
  969. {
  970. kvm_lose_fpu(vcpu);
  971. if (current->flags & PF_VCPU) {
  972. /* Restore normal Linux process memory map */
  973. if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
  974. asid_version_mask(cpu)))
  975. get_new_mmu_context(current->mm, cpu);
  976. write_c0_entryhi(cpu_asid(cpu, current->mm));
  977. TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
  978. kvm_mips_resume_mm(cpu);
  979. ehb();
  980. }
  981. return 0;
  982. }
  983. static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
  984. bool reload_asid)
  985. {
  986. struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
  987. struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
  988. struct mm_struct *mm;
  989. int i;
  990. if (likely(!kvm_request_pending(vcpu)))
  991. return;
  992. if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
  993. /*
  994. * Both kernel & user GVA mappings must be invalidated. The
  995. * caller is just about to check whether the ASID is stale
  996. * anyway so no need to reload it here.
  997. */
  998. kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
  999. kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
  1000. for_each_possible_cpu(i) {
  1001. cpu_context(i, kern_mm) = 0;
  1002. cpu_context(i, user_mm) = 0;
  1003. }
  1004. /* Generate new ASID for current mode */
  1005. if (reload_asid) {
  1006. mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
  1007. get_new_mmu_context(mm, cpu);
  1008. htw_stop();
  1009. write_c0_entryhi(cpu_asid(cpu, mm));
  1010. TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
  1011. htw_start();
  1012. }
  1013. }
  1014. }
  1015. /**
  1016. * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
  1017. * @vcpu: VCPU pointer.
  1018. *
  1019. * Call before a GVA space access outside of guest mode, to ensure that
  1020. * asynchronous TLB flush requests are handled or delayed until completion of
  1021. * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
  1022. *
  1023. * Should be called with IRQs already enabled.
  1024. */
  1025. void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu)
  1026. {
  1027. /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
  1028. WARN_ON_ONCE(irqs_disabled());
  1029. /*
  1030. * The caller is about to access the GVA space, so we set the mode to
  1031. * force TLB flush requests to send an IPI, and also disable IRQs to
  1032. * delay IPI handling until kvm_trap_emul_gva_lockless_end().
  1033. */
  1034. local_irq_disable();
  1035. /*
  1036. * Make sure the read of VCPU requests is not reordered ahead of the
  1037. * write to vcpu->mode, or we could miss a TLB flush request while
  1038. * the requester sees the VCPU as outside of guest mode and not needing
  1039. * an IPI.
  1040. */
  1041. smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
  1042. /*
  1043. * If a TLB flush has been requested (potentially while
  1044. * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
  1045. * before accessing the GVA space, and be sure to reload the ASID if
  1046. * necessary as it'll be immediately used.
  1047. *
  1048. * TLB flush requests after this check will trigger an IPI due to the
  1049. * mode change above, which will be delayed due to IRQs disabled.
  1050. */
  1051. kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true);
  1052. }
  1053. /**
  1054. * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
  1055. * @vcpu: VCPU pointer.
  1056. *
  1057. * Called after a GVA space access outside of guest mode. Should have a matching
  1058. * call to kvm_trap_emul_gva_lockless_begin().
  1059. */
  1060. void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
  1061. {
  1062. /*
  1063. * Make sure the write to vcpu->mode is not reordered in front of GVA
  1064. * accesses, or a TLB flush requester may not think it necessary to send
  1065. * an IPI.
  1066. */
  1067. smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
  1068. /*
  1069. * Now that the access to GVA space is complete, its safe for pending
  1070. * TLB flush request IPIs to be handled (which indicates completion).
  1071. */
  1072. local_irq_enable();
  1073. }
  1074. static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
  1075. struct kvm_vcpu *vcpu)
  1076. {
  1077. struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
  1078. struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
  1079. struct mm_struct *mm;
  1080. struct mips_coproc *cop0 = vcpu->arch.cop0;
  1081. int i, cpu = smp_processor_id();
  1082. unsigned int gasid;
  1083. /*
  1084. * No need to reload ASID, IRQs are disabled already so there's no rush,
  1085. * and we'll check if we need to regenerate below anyway before
  1086. * re-entering the guest.
  1087. */
  1088. kvm_trap_emul_check_requests(vcpu, cpu, false);
  1089. if (KVM_GUEST_KERNEL_MODE(vcpu)) {
  1090. mm = kern_mm;
  1091. } else {
  1092. mm = user_mm;
  1093. /*
  1094. * Lazy host ASID regeneration / PT flush for guest user mode.
  1095. * If the guest ASID has changed since the last guest usermode
  1096. * execution, invalidate the stale TLB entries and flush GVA PT
  1097. * entries too.
  1098. */
  1099. gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
  1100. if (gasid != vcpu->arch.last_user_gasid) {
  1101. kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
  1102. for_each_possible_cpu(i)
  1103. cpu_context(i, user_mm) = 0;
  1104. vcpu->arch.last_user_gasid = gasid;
  1105. }
  1106. }
  1107. /*
  1108. * Check if ASID is stale. This may happen due to a TLB flush request or
  1109. * a lazy user MM invalidation.
  1110. */
  1111. if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
  1112. asid_version_mask(cpu))
  1113. get_new_mmu_context(mm, cpu);
  1114. }
  1115. static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
  1116. {
  1117. int cpu = smp_processor_id();
  1118. int r;
  1119. /* Check if we have any exceptions/interrupts pending */
  1120. kvm_mips_deliver_interrupts(vcpu,
  1121. kvm_read_c0_guest_cause(vcpu->arch.cop0));
  1122. kvm_trap_emul_vcpu_reenter(run, vcpu);
  1123. /*
  1124. * We use user accessors to access guest memory, but we don't want to
  1125. * invoke Linux page faulting.
  1126. */
  1127. pagefault_disable();
  1128. /* Disable hardware page table walking while in guest */
  1129. htw_stop();
  1130. /*
  1131. * While in guest context we're in the guest's address space, not the
  1132. * host process address space, so we need to be careful not to confuse
  1133. * e.g. cache management IPIs.
  1134. */
  1135. kvm_mips_suspend_mm(cpu);
  1136. r = vcpu->arch.vcpu_run(run, vcpu);
  1137. /* We may have migrated while handling guest exits */
  1138. cpu = smp_processor_id();
  1139. /* Restore normal Linux process memory map */
  1140. if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
  1141. asid_version_mask(cpu)))
  1142. get_new_mmu_context(current->mm, cpu);
  1143. write_c0_entryhi(cpu_asid(cpu, current->mm));
  1144. TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
  1145. kvm_mips_resume_mm(cpu);
  1146. htw_start();
  1147. pagefault_enable();
  1148. return r;
  1149. }
  1150. static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
  1151. /* exit handlers */
  1152. .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
  1153. .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
  1154. .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
  1155. .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
  1156. .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
  1157. .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
  1158. .handle_syscall = kvm_trap_emul_handle_syscall,
  1159. .handle_res_inst = kvm_trap_emul_handle_res_inst,
  1160. .handle_break = kvm_trap_emul_handle_break,
  1161. .handle_trap = kvm_trap_emul_handle_trap,
  1162. .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
  1163. .handle_fpe = kvm_trap_emul_handle_fpe,
  1164. .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
  1165. .handle_guest_exit = kvm_trap_emul_no_handler,
  1166. .hardware_enable = kvm_trap_emul_hardware_enable,
  1167. .hardware_disable = kvm_trap_emul_hardware_disable,
  1168. .check_extension = kvm_trap_emul_check_extension,
  1169. .vcpu_init = kvm_trap_emul_vcpu_init,
  1170. .vcpu_uninit = kvm_trap_emul_vcpu_uninit,
  1171. .vcpu_setup = kvm_trap_emul_vcpu_setup,
  1172. .flush_shadow_all = kvm_trap_emul_flush_shadow_all,
  1173. .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot,
  1174. .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
  1175. .queue_timer_int = kvm_mips_queue_timer_int_cb,
  1176. .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
  1177. .queue_io_int = kvm_mips_queue_io_int_cb,
  1178. .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
  1179. .irq_deliver = kvm_mips_irq_deliver_cb,
  1180. .irq_clear = kvm_mips_irq_clear_cb,
  1181. .num_regs = kvm_trap_emul_num_regs,
  1182. .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
  1183. .get_one_reg = kvm_trap_emul_get_one_reg,
  1184. .set_one_reg = kvm_trap_emul_set_one_reg,
  1185. .vcpu_load = kvm_trap_emul_vcpu_load,
  1186. .vcpu_put = kvm_trap_emul_vcpu_put,
  1187. .vcpu_run = kvm_trap_emul_vcpu_run,
  1188. .vcpu_reenter = kvm_trap_emul_vcpu_reenter,
  1189. };
  1190. int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
  1191. {
  1192. *install_callbacks = &kvm_trap_emul_callbacks;
  1193. return 0;
  1194. }