emulate.c 5.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179
  1. /*
  2. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  3. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License, version 2, as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
  17. */
  18. #include <linux/mm.h>
  19. #include <linux/kvm_host.h>
  20. #include <asm/kvm_arm.h>
  21. #include <asm/kvm_emulate.h>
  22. #include <asm/opcodes.h>
  23. #include <trace/events/kvm.h>
  24. #include "trace.h"
  25. #define VCPU_NR_MODES 6
  26. #define VCPU_REG_OFFSET_USR 0
  27. #define VCPU_REG_OFFSET_FIQ 1
  28. #define VCPU_REG_OFFSET_IRQ 2
  29. #define VCPU_REG_OFFSET_SVC 3
  30. #define VCPU_REG_OFFSET_ABT 4
  31. #define VCPU_REG_OFFSET_UND 5
  32. #define REG_OFFSET(_reg) \
  33. (offsetof(struct kvm_regs, _reg) / sizeof(u32))
  34. #define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
  35. static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
  36. /* USR/SYS Registers */
  37. [VCPU_REG_OFFSET_USR] = {
  38. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  39. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  40. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  41. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  42. USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
  43. },
  44. /* FIQ Registers */
  45. [VCPU_REG_OFFSET_FIQ] = {
  46. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  47. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  48. USR_REG_OFFSET(6), USR_REG_OFFSET(7),
  49. REG_OFFSET(fiq_regs[0]), /* r8 */
  50. REG_OFFSET(fiq_regs[1]), /* r9 */
  51. REG_OFFSET(fiq_regs[2]), /* r10 */
  52. REG_OFFSET(fiq_regs[3]), /* r11 */
  53. REG_OFFSET(fiq_regs[4]), /* r12 */
  54. REG_OFFSET(fiq_regs[5]), /* r13 */
  55. REG_OFFSET(fiq_regs[6]), /* r14 */
  56. },
  57. /* IRQ Registers */
  58. [VCPU_REG_OFFSET_IRQ] = {
  59. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  60. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  61. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  62. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  63. USR_REG_OFFSET(12),
  64. REG_OFFSET(irq_regs[0]), /* r13 */
  65. REG_OFFSET(irq_regs[1]), /* r14 */
  66. },
  67. /* SVC Registers */
  68. [VCPU_REG_OFFSET_SVC] = {
  69. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  70. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  71. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  72. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  73. USR_REG_OFFSET(12),
  74. REG_OFFSET(svc_regs[0]), /* r13 */
  75. REG_OFFSET(svc_regs[1]), /* r14 */
  76. },
  77. /* ABT Registers */
  78. [VCPU_REG_OFFSET_ABT] = {
  79. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  80. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  81. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  82. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  83. USR_REG_OFFSET(12),
  84. REG_OFFSET(abt_regs[0]), /* r13 */
  85. REG_OFFSET(abt_regs[1]), /* r14 */
  86. },
  87. /* UND Registers */
  88. [VCPU_REG_OFFSET_UND] = {
  89. USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
  90. USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
  91. USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
  92. USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
  93. USR_REG_OFFSET(12),
  94. REG_OFFSET(und_regs[0]), /* r13 */
  95. REG_OFFSET(und_regs[1]), /* r14 */
  96. },
  97. };
  98. /*
  99. * Return a pointer to the register number valid in the current mode of
  100. * the virtual CPU.
  101. */
  102. unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
  103. {
  104. unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs;
  105. unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
  106. switch (mode) {
  107. case USR_MODE...SVC_MODE:
  108. mode &= ~MODE32_BIT; /* 0 ... 3 */
  109. break;
  110. case ABT_MODE:
  111. mode = VCPU_REG_OFFSET_ABT;
  112. break;
  113. case UND_MODE:
  114. mode = VCPU_REG_OFFSET_UND;
  115. break;
  116. case SYSTEM_MODE:
  117. mode = VCPU_REG_OFFSET_USR;
  118. break;
  119. default:
  120. BUG();
  121. }
  122. return reg_array + vcpu_reg_offsets[mode][reg_num];
  123. }
  124. /*
  125. * Return the SPSR for the current mode of the virtual CPU.
  126. */
  127. unsigned long *__vcpu_spsr(struct kvm_vcpu *vcpu)
  128. {
  129. unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
  130. switch (mode) {
  131. case SVC_MODE:
  132. return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr;
  133. case ABT_MODE:
  134. return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr;
  135. case UND_MODE:
  136. return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr;
  137. case IRQ_MODE:
  138. return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr;
  139. case FIQ_MODE:
  140. return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr;
  141. default:
  142. BUG();
  143. }
  144. }
  145. /******************************************************************************
  146. * Inject exceptions into the guest
  147. */
  148. /**
  149. * kvm_inject_vabt - inject an async abort / SError into the guest
  150. * @vcpu: The VCPU to receive the exception
  151. *
  152. * It is assumed that this code is called from the VCPU thread and that the
  153. * VCPU therefore is not currently executing guest code.
  154. */
  155. void kvm_inject_vabt(struct kvm_vcpu *vcpu)
  156. {
  157. *vcpu_hcr(vcpu) |= HCR_VA;
  158. }