handle_exit.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148
  1. /*
  2. * Copyright (C) 2012,2013 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * Derived from arch/arm/kvm/handle_exit.c:
  6. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  7. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License version 2 as
  11. * published by the Free Software Foundation.
  12. *
  13. * This program is distributed in the hope that it will be useful,
  14. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16. * GNU General Public License for more details.
  17. *
  18. * You should have received a copy of the GNU General Public License
  19. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  20. */
  21. #include <linux/kvm.h>
  22. #include <linux/kvm_host.h>
  23. #include <asm/esr.h>
  24. #include <asm/kvm_coproc.h>
  25. #include <asm/kvm_emulate.h>
  26. #include <asm/kvm_mmu.h>
  27. #include <asm/kvm_psci.h>
  28. #define CREATE_TRACE_POINTS
  29. #include "trace.h"
  30. typedef int (*exit_handle_fn)(struct kvm_vcpu *, struct kvm_run *);
  31. static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run)
  32. {
  33. int ret;
  34. trace_kvm_hvc_arm64(*vcpu_pc(vcpu), *vcpu_reg(vcpu, 0),
  35. kvm_vcpu_hvc_get_imm(vcpu));
  36. ret = kvm_psci_call(vcpu);
  37. if (ret < 0) {
  38. kvm_inject_undefined(vcpu);
  39. return 1;
  40. }
  41. return ret;
  42. }
  43. static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run)
  44. {
  45. kvm_inject_undefined(vcpu);
  46. return 1;
  47. }
  48. /**
  49. * kvm_handle_wfx - handle a wait-for-interrupts or wait-for-event
  50. * instruction executed by a guest
  51. *
  52. * @vcpu: the vcpu pointer
  53. *
  54. * WFE: Yield the CPU and come back to this vcpu when the scheduler
  55. * decides to.
  56. * WFI: Simply call kvm_vcpu_block(), which will halt execution of
  57. * world-switches and schedule other host processes until there is an
  58. * incoming IRQ or FIQ to the VM.
  59. */
  60. static int kvm_handle_wfx(struct kvm_vcpu *vcpu, struct kvm_run *run)
  61. {
  62. if (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WFx_ISS_WFE) {
  63. trace_kvm_wfx_arm64(*vcpu_pc(vcpu), true);
  64. kvm_vcpu_on_spin(vcpu);
  65. } else {
  66. trace_kvm_wfx_arm64(*vcpu_pc(vcpu), false);
  67. kvm_vcpu_block(vcpu);
  68. }
  69. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  70. return 1;
  71. }
  72. static exit_handle_fn arm_exit_handlers[] = {
  73. [ESR_ELx_EC_WFx] = kvm_handle_wfx,
  74. [ESR_ELx_EC_CP15_32] = kvm_handle_cp15_32,
  75. [ESR_ELx_EC_CP15_64] = kvm_handle_cp15_64,
  76. [ESR_ELx_EC_CP14_MR] = kvm_handle_cp14_32,
  77. [ESR_ELx_EC_CP14_LS] = kvm_handle_cp14_load_store,
  78. [ESR_ELx_EC_CP14_64] = kvm_handle_cp14_64,
  79. [ESR_ELx_EC_HVC32] = handle_hvc,
  80. [ESR_ELx_EC_SMC32] = handle_smc,
  81. [ESR_ELx_EC_HVC64] = handle_hvc,
  82. [ESR_ELx_EC_SMC64] = handle_smc,
  83. [ESR_ELx_EC_SYS64] = kvm_handle_sys_reg,
  84. [ESR_ELx_EC_IABT_LOW] = kvm_handle_guest_abort,
  85. [ESR_ELx_EC_DABT_LOW] = kvm_handle_guest_abort,
  86. };
  87. static exit_handle_fn kvm_get_exit_handler(struct kvm_vcpu *vcpu)
  88. {
  89. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  90. u8 hsr_ec = hsr >> ESR_ELx_EC_SHIFT;
  91. if (hsr_ec >= ARRAY_SIZE(arm_exit_handlers) ||
  92. !arm_exit_handlers[hsr_ec]) {
  93. kvm_err("Unknown exception class: hsr: %#08x -- %s\n",
  94. hsr, esr_get_class_string(hsr));
  95. BUG();
  96. }
  97. return arm_exit_handlers[hsr_ec];
  98. }
  99. /*
  100. * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
  101. * proper exit to userspace.
  102. */
  103. int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
  104. int exception_index)
  105. {
  106. exit_handle_fn exit_handler;
  107. switch (exception_index) {
  108. case ARM_EXCEPTION_IRQ:
  109. return 1;
  110. case ARM_EXCEPTION_TRAP:
  111. /*
  112. * See ARM ARM B1.14.1: "Hyp traps on instructions
  113. * that fail their condition code check"
  114. */
  115. if (!kvm_condition_valid(vcpu)) {
  116. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  117. return 1;
  118. }
  119. exit_handler = kvm_get_exit_handler(vcpu);
  120. return exit_handler(vcpu, run);
  121. default:
  122. kvm_pr_unimpl("Unsupported exception type: %d",
  123. exception_index);
  124. run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
  125. return 0;
  126. }
  127. }