emulate.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316
  1. /*
  2. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  3. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License, version 2, as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. */
  18. #include <linux/mm.h>
  19. #include <linux/kvm_host.h>
  20. #include <asm/kvm_arm.h>
  21. #include <asm/kvm_emulate.h>
  22. #include <asm/opcodes.h>
  23. #include <trace/events/kvm.h>
  24. #include "trace.h"
  25. #define VCPU_NR_MODES 6
  26. #define VCPU_REG_OFFSET_USR 0
  27. #define VCPU_REG_OFFSET_FIQ 1
  28. #define VCPU_REG_OFFSET_IRQ 2
  29. #define VCPU_REG_OFFSET_SVC 3
  30. #define VCPU_REG_OFFSET_ABT 4
  31. #define VCPU_REG_OFFSET_UND 5
  32. #define REG_OFFSET(_reg) \
  33. (offsetof(struct kvm_regs, _reg) / sizeof(u32))
  34. #define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
  35. static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
  36. /* USR/SYS Registers */
  37. [VCPU_REG_OFFSET_USR] = {
  38. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  39. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  40. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  41. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  42. USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
  43. },
  44. /* FIQ Registers */
  45. [VCPU_REG_OFFSET_FIQ] = {
  46. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  47. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  48. USR_REG_OFFSET(6), USR_REG_OFFSET(7),
  49. REG_OFFSET(fiq_regs[0]), /* r8 */
  50. REG_OFFSET(fiq_regs[1]), /* r9 */
  51. REG_OFFSET(fiq_regs[2]), /* r10 */
  52. REG_OFFSET(fiq_regs[3]), /* r11 */
  53. REG_OFFSET(fiq_regs[4]), /* r12 */
  54. REG_OFFSET(fiq_regs[5]), /* r13 */
  55. REG_OFFSET(fiq_regs[6]), /* r14 */
  56. },
  57. /* IRQ Registers */
  58. [VCPU_REG_OFFSET_IRQ] = {
  59. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  60. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  61. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  62. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  63. USR_REG_OFFSET(12),
  64. REG_OFFSET(irq_regs[0]), /* r13 */
  65. REG_OFFSET(irq_regs[1]), /* r14 */
  66. },
  67. /* SVC Registers */
  68. [VCPU_REG_OFFSET_SVC] = {
  69. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  70. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  71. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  72. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  73. USR_REG_OFFSET(12),
  74. REG_OFFSET(svc_regs[0]), /* r13 */
  75. REG_OFFSET(svc_regs[1]), /* r14 */
  76. },
  77. /* ABT Registers */
  78. [VCPU_REG_OFFSET_ABT] = {
  79. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  80. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  81. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  82. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  83. USR_REG_OFFSET(12),
  84. REG_OFFSET(abt_regs[0]), /* r13 */
  85. REG_OFFSET(abt_regs[1]), /* r14 */
  86. },
  87. /* UND Registers */
  88. [VCPU_REG_OFFSET_UND] = {
  89. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  90. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  91. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  92. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  93. USR_REG_OFFSET(12),
  94. REG_OFFSET(und_regs[0]), /* r13 */
  95. REG_OFFSET(und_regs[1]), /* r14 */
  96. },
  97. };
  98. /*
  99. * Return a pointer to the register number valid in the current mode of
  100. * the virtual CPU.
  101. */
  102. unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
  103. {
  104. unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs;
  105. unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
  106. switch (mode) {
  107. case USR_MODE...SVC_MODE:
  108. mode &= ~MODE32_BIT; /* 0 ... 3 */
  109. break;
  110. case ABT_MODE:
  111. mode = VCPU_REG_OFFSET_ABT;
  112. break;
  113. case UND_MODE:
  114. mode = VCPU_REG_OFFSET_UND;
  115. break;
  116. case SYSTEM_MODE:
  117. mode = VCPU_REG_OFFSET_USR;
  118. break;
  119. default:
  120. BUG();
  121. }
  122. return reg_array + vcpu_reg_offsets[mode][reg_num];
  123. }
  124. /*
  125. * Return the SPSR for the current mode of the virtual CPU.
  126. */
  127. unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
  128. {
  129. unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
  130. switch (mode) {
  131. case SVC_MODE:
  132. return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr;
  133. case ABT_MODE:
  134. return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr;
  135. case UND_MODE:
  136. return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr;
  137. case IRQ_MODE:
  138. return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr;
  139. case FIQ_MODE:
  140. return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr;
  141. default:
  142. BUG();
  143. }
  144. }
  145. /******************************************************************************
  146. * Inject exceptions into the guest
  147. */
  148. static u32 exc_vector_base(struct kvm_vcpu *vcpu)
  149. {
  150. u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
  151. u32 vbar = vcpu_cp15(vcpu, c12_VBAR);
  152. if (sctlr & SCTLR_V)
  153. return 0xffff0000;
  154. else /* always have security exceptions */
  155. return vbar;
  156. }
  157. /*
  158. * Switch to an exception mode, updating both CPSR and SPSR. Follow
  159. * the logic described in AArch32.EnterMode() from the ARMv8 ARM.
  160. */
  161. static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode)
  162. {
  163. unsigned long cpsr = *vcpu_cpsr(vcpu);
  164. u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
  165. *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode;
  166. switch (mode) {
  167. case FIQ_MODE:
  168. *vcpu_cpsr(vcpu) |= PSR_F_BIT;
  169. /* Fall through */
  170. case ABT_MODE:
  171. case IRQ_MODE:
  172. *vcpu_cpsr(vcpu) |= PSR_A_BIT;
  173. /* Fall through */
  174. default:
  175. *vcpu_cpsr(vcpu) |= PSR_I_BIT;
  176. }
  177. *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
  178. if (sctlr & SCTLR_TE)
  179. *vcpu_cpsr(vcpu) |= PSR_T_BIT;
  180. if (sctlr & SCTLR_EE)
  181. *vcpu_cpsr(vcpu) |= PSR_E_BIT;
  182. /* Note: These now point to the mode banked copies */
  183. *vcpu_spsr(vcpu) = cpsr;
  184. }
  185. /**
  186. * kvm_inject_undefined - inject an undefined exception into the guest
  187. * @vcpu: The VCPU to receive the undefined exception
  188. *
  189. * It is assumed that this code is called from the VCPU thread and that the
  190. * VCPU therefore is not currently executing guest code.
  191. *
  192. * Modelled after TakeUndefInstrException() pseudocode.
  193. */
  194. void kvm_inject_undefined(struct kvm_vcpu *vcpu)
  195. {
  196. unsigned long cpsr = *vcpu_cpsr(vcpu);
  197. bool is_thumb = (cpsr & PSR_T_BIT);
  198. u32 vect_offset = 4;
  199. u32 return_offset = (is_thumb) ? 2 : 4;
  200. kvm_update_psr(vcpu, UND_MODE);
  201. *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
  202. /* Branch to exception vector */
  203. *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
  204. }
  205. /*
  206. * Modelled after TakeDataAbortException() and TakePrefetchAbortException
  207. * pseudocode.
  208. */
  209. static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
  210. {
  211. u32 vect_offset;
  212. u32 return_offset = (is_pabt) ? 4 : 8;
  213. bool is_lpae;
  214. kvm_update_psr(vcpu, ABT_MODE);
  215. *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
  216. if (is_pabt)
  217. vect_offset = 12;
  218. else
  219. vect_offset = 16;
  220. /* Branch to exception vector */
  221. *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
  222. if (is_pabt) {
  223. /* Set IFAR and IFSR */
  224. vcpu_cp15(vcpu, c6_IFAR) = addr;
  225. is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
  226. /* Always give debug fault for now - should give guest a clue */
  227. if (is_lpae)
  228. vcpu_cp15(vcpu, c5_IFSR) = 1 << 9 | 0x22;
  229. else
  230. vcpu_cp15(vcpu, c5_IFSR) = 2;
  231. } else { /* !iabt */
  232. /* Set DFAR and DFSR */
  233. vcpu_cp15(vcpu, c6_DFAR) = addr;
  234. is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
  235. /* Always give debug fault for now - should give guest a clue */
  236. if (is_lpae)
  237. vcpu_cp15(vcpu, c5_DFSR) = 1 << 9 | 0x22;
  238. else
  239. vcpu_cp15(vcpu, c5_DFSR) = 2;
  240. }
  241. }
  242. /**
  243. * kvm_inject_dabt - inject a data abort into the guest
  244. * @vcpu: The VCPU to receive the undefined exception
  245. * @addr: The address to report in the DFAR
  246. *
  247. * It is assumed that this code is called from the VCPU thread and that the
  248. * VCPU therefore is not currently executing guest code.
  249. */
  250. void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
  251. {
  252. inject_abt(vcpu, false, addr);
  253. }
  254. /**
  255. * kvm_inject_pabt - inject a prefetch abort into the guest
  256. * @vcpu: The VCPU to receive the undefined exception
  257. * @addr: The address to report in the DFAR
  258. *
  259. * It is assumed that this code is called from the VCPU thread and that the
  260. * VCPU therefore is not currently executing guest code.
  261. */
  262. void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
  263. {
  264. inject_abt(vcpu, true, addr);
  265. }
  266. /**
  267. * kvm_inject_vabt - inject an async abort / SError into the guest
  268. * @vcpu: The VCPU to receive the exception
  269. *
  270. * It is assumed that this code is called from the VCPU thread and that the
  271. * VCPU therefore is not currently executing guest code.
  272. */
  273. void kvm_inject_vabt(struct kvm_vcpu *vcpu)
  274. {
  275. vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VA);
  276. }