inject_fault.c 6.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260
  1. /*
  2. * Fault injection for both 32 and 64bit guests.
  3. *
  4. * Copyright (C) 2012,2013 - ARM Ltd
  5. * Author: Marc Zyngier <marc.zyngier@arm.com>
  6. *
  7. * Based on arch/arm/kvm/emulate.c
  8. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  9. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  10. *
  11. * This program is free software: you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  22. */
  23. #include <linux/kvm_host.h>
  24. #include <asm/kvm_emulate.h>
  25. #include <asm/esr.h>
  26. #define PSTATE_FAULT_BITS_64 (PSR_MODE_EL1h | PSR_A_BIT | PSR_F_BIT | \
  27. PSR_I_BIT | PSR_D_BIT)
  28. #define CURRENT_EL_SP_EL0_VECTOR 0x0
  29. #define CURRENT_EL_SP_ELx_VECTOR 0x200
  30. #define LOWER_EL_AArch64_VECTOR 0x400
  31. #define LOWER_EL_AArch32_VECTOR 0x600
  32. /*
  33. * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
  34. */
  35. static const u8 return_offsets[8][2] = {
  36. [0] = { 0, 0 }, /* Reset, unused */
  37. [1] = { 4, 2 }, /* Undefined */
  38. [2] = { 0, 0 }, /* SVC, unused */
  39. [3] = { 4, 4 }, /* Prefetch abort */
  40. [4] = { 8, 8 }, /* Data abort */
  41. [5] = { 0, 0 }, /* HVC, unused */
  42. [6] = { 4, 4 }, /* IRQ, unused */
  43. [7] = { 4, 4 }, /* FIQ, unused */
  44. };
  45. static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
  46. {
  47. unsigned long cpsr;
  48. unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
  49. bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
  50. u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
  51. u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
  52. cpsr = mode | COMPAT_PSR_I_BIT;
  53. if (sctlr & (1 << 30))
  54. cpsr |= COMPAT_PSR_T_BIT;
  55. if (sctlr & (1 << 25))
  56. cpsr |= COMPAT_PSR_E_BIT;
  57. *vcpu_cpsr(vcpu) = cpsr;
  58. /* Note: These now point to the banked copies */
  59. *vcpu_spsr(vcpu) = new_spsr_value;
  60. *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
  61. /* Branch to exception vector */
  62. if (sctlr & (1 << 13))
  63. vect_offset += 0xffff0000;
  64. else /* always have security exceptions */
  65. vect_offset += vcpu_cp15(vcpu, c12_VBAR);
  66. *vcpu_pc(vcpu) = vect_offset;
  67. }
  68. static void inject_undef32(struct kvm_vcpu *vcpu)
  69. {
  70. prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
  71. }
  72. /*
  73. * Modelled after TakeDataAbortException() and TakePrefetchAbortException
  74. * pseudocode.
  75. */
  76. static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
  77. unsigned long addr)
  78. {
  79. u32 vect_offset;
  80. u32 *far, *fsr;
  81. bool is_lpae;
  82. if (is_pabt) {
  83. vect_offset = 12;
  84. far = &vcpu_cp15(vcpu, c6_IFAR);
  85. fsr = &vcpu_cp15(vcpu, c5_IFSR);
  86. } else { /* !iabt */
  87. vect_offset = 16;
  88. far = &vcpu_cp15(vcpu, c6_DFAR);
  89. fsr = &vcpu_cp15(vcpu, c5_DFSR);
  90. }
  91. prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
  92. *far = addr;
  93. /* Give the guest an IMPLEMENTATION DEFINED exception */
  94. is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
  95. if (is_lpae)
  96. *fsr = 1 << 9 | 0x34;
  97. else
  98. *fsr = 0x14;
  99. }
  100. enum exception_type {
  101. except_type_sync = 0,
  102. except_type_irq = 0x80,
  103. except_type_fiq = 0x100,
  104. except_type_serror = 0x180,
  105. };
  106. static u64 get_except_vector(struct kvm_vcpu *vcpu, enum exception_type type)
  107. {
  108. u64 exc_offset;
  109. switch (*vcpu_cpsr(vcpu) & (PSR_MODE_MASK | PSR_MODE32_BIT)) {
  110. case PSR_MODE_EL1t:
  111. exc_offset = CURRENT_EL_SP_EL0_VECTOR;
  112. break;
  113. case PSR_MODE_EL1h:
  114. exc_offset = CURRENT_EL_SP_ELx_VECTOR;
  115. break;
  116. case PSR_MODE_EL0t:
  117. exc_offset = LOWER_EL_AArch64_VECTOR;
  118. break;
  119. default:
  120. exc_offset = LOWER_EL_AArch32_VECTOR;
  121. }
  122. return vcpu_sys_reg(vcpu, VBAR_EL1) + exc_offset + type;
  123. }
  124. static void inject_abt64(struct kvm_vcpu *vcpu, bool is_iabt, unsigned long addr)
  125. {
  126. unsigned long cpsr = *vcpu_cpsr(vcpu);
  127. bool is_aarch32 = vcpu_mode_is_32bit(vcpu);
  128. u32 esr = 0;
  129. *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
  130. *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
  131. *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
  132. *vcpu_spsr(vcpu) = cpsr;
  133. vcpu_sys_reg(vcpu, FAR_EL1) = addr;
  134. /*
  135. * Build an {i,d}abort, depending on the level and the
  136. * instruction set. Report an external synchronous abort.
  137. */
  138. if (kvm_vcpu_trap_il_is32bit(vcpu))
  139. esr |= ESR_ELx_IL;
  140. /*
  141. * Here, the guest runs in AArch64 mode when in EL1. If we get
  142. * an AArch32 fault, it means we managed to trap an EL0 fault.
  143. */
  144. if (is_aarch32 || (cpsr & PSR_MODE_MASK) == PSR_MODE_EL0t)
  145. esr |= (ESR_ELx_EC_IABT_LOW << ESR_ELx_EC_SHIFT);
  146. else
  147. esr |= (ESR_ELx_EC_IABT_CUR << ESR_ELx_EC_SHIFT);
  148. if (!is_iabt)
  149. esr |= ESR_ELx_EC_DABT_LOW << ESR_ELx_EC_SHIFT;
  150. vcpu_sys_reg(vcpu, ESR_EL1) = esr | ESR_ELx_FSC_EXTABT;
  151. }
  152. static void inject_undef64(struct kvm_vcpu *vcpu)
  153. {
  154. unsigned long cpsr = *vcpu_cpsr(vcpu);
  155. u32 esr = (ESR_ELx_EC_UNKNOWN << ESR_ELx_EC_SHIFT);
  156. *vcpu_elr_el1(vcpu) = *vcpu_pc(vcpu);
  157. *vcpu_pc(vcpu) = get_except_vector(vcpu, except_type_sync);
  158. *vcpu_cpsr(vcpu) = PSTATE_FAULT_BITS_64;
  159. *vcpu_spsr(vcpu) = cpsr;
  160. /*
  161. * Build an unknown exception, depending on the instruction
  162. * set.
  163. */
  164. if (kvm_vcpu_trap_il_is32bit(vcpu))
  165. esr |= ESR_ELx_IL;
  166. vcpu_sys_reg(vcpu, ESR_EL1) = esr;
  167. }
  168. /**
  169. * kvm_inject_dabt - inject a data abort into the guest
  170. * @vcpu: The VCPU to receive the undefined exception
  171. * @addr: The address to report in the DFAR
  172. *
  173. * It is assumed that this code is called from the VCPU thread and that the
  174. * VCPU therefore is not currently executing guest code.
  175. */
  176. void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
  177. {
  178. if (!(vcpu->arch.hcr_el2 & HCR_RW))
  179. inject_abt32(vcpu, false, addr);
  180. else
  181. inject_abt64(vcpu, false, addr);
  182. }
  183. /**
  184. * kvm_inject_pabt - inject a prefetch abort into the guest
  185. * @vcpu: The VCPU to receive the undefined exception
  186. * @addr: The address to report in the DFAR
  187. *
  188. * It is assumed that this code is called from the VCPU thread and that the
  189. * VCPU therefore is not currently executing guest code.
  190. */
  191. void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
  192. {
  193. if (!(vcpu->arch.hcr_el2 & HCR_RW))
  194. inject_abt32(vcpu, true, addr);
  195. else
  196. inject_abt64(vcpu, true, addr);
  197. }
  198. /**
  199. * kvm_inject_undefined - inject an undefined instruction into the guest
  200. *
  201. * It is assumed that this code is called from the VCPU thread and that the
  202. * VCPU therefore is not currently executing guest code.
  203. */
  204. void kvm_inject_undefined(struct kvm_vcpu *vcpu)
  205. {
  206. if (!(vcpu->arch.hcr_el2 & HCR_RW))
  207. inject_undef32(vcpu);
  208. else
  209. inject_undef64(vcpu);
  210. }
  211. /**
  212. * kvm_inject_vabt - inject an async abort / SError into the guest
  213. * @vcpu: The VCPU to receive the exception
  214. *
  215. * It is assumed that this code is called from the VCPU thread and that the
  216. * VCPU therefore is not currently executing guest code.
  217. */
  218. void kvm_inject_vabt(struct kvm_vcpu *vcpu)
  219. {
  220. vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE);
  221. }