aarch32.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216
  1. /*
  2. * (not much of an) Emulation layer for 32bit guests.
  3. *
  4. * Copyright (C) 2012,2013 - ARM Ltd
  5. * Author: Marc Zyngier <marc.zyngier@arm.com>
  6. *
  7. * based on arch/arm/kvm/emulate.c
  8. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  9. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  10. *
  11. * This program is free software: you can redistribute it and/or modify
  12. * it under the terms of the GNU General Public License version 2 as
  13. * published by the Free Software Foundation.
  14. *
  15. * This program is distributed in the hope that it will be useful,
  16. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  17. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  18. * GNU General Public License for more details.
  19. *
  20. * You should have received a copy of the GNU General Public License
  21. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  22. */
  23. #include <linux/bits.h>
  24. #include <linux/kvm_host.h>
  25. #include <asm/kvm_emulate.h>
  26. #include <asm/kvm_hyp.h>
  27. #define DFSR_FSC_EXTABT_LPAE 0x10
  28. #define DFSR_FSC_EXTABT_nLPAE 0x08
  29. #define DFSR_LPAE BIT(9)
  30. /*
  31. * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
  32. */
  33. static const u8 return_offsets[8][2] = {
  34. [0] = { 0, 0 }, /* Reset, unused */
  35. [1] = { 4, 2 }, /* Undefined */
  36. [2] = { 0, 0 }, /* SVC, unused */
  37. [3] = { 4, 4 }, /* Prefetch abort */
  38. [4] = { 8, 8 }, /* Data abort */
  39. [5] = { 0, 0 }, /* HVC, unused */
  40. [6] = { 4, 4 }, /* IRQ, unused */
  41. [7] = { 4, 4 }, /* FIQ, unused */
  42. };
  43. /*
  44. * When an exception is taken, most CPSR fields are left unchanged in the
  45. * handler. However, some are explicitly overridden (e.g. M[4:0]).
  46. *
  47. * The SPSR/SPSR_ELx layouts differ, and the below is intended to work with
  48. * either format. Note: SPSR.J bit doesn't exist in SPSR_ELx, but this bit was
  49. * obsoleted by the ARMv7 virtualization extensions and is RES0.
  50. *
  51. * For the SPSR layout seen from AArch32, see:
  52. * - ARM DDI 0406C.d, page B1-1148
  53. * - ARM DDI 0487E.a, page G8-6264
  54. *
  55. * For the SPSR_ELx layout for AArch32 seen from AArch64, see:
  56. * - ARM DDI 0487E.a, page C5-426
  57. *
  58. * Here we manipulate the fields in order of the AArch32 SPSR_ELx layout, from
  59. * MSB to LSB.
  60. */
  61. static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode)
  62. {
  63. u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
  64. unsigned long old, new;
  65. old = *vcpu_cpsr(vcpu);
  66. new = 0;
  67. new |= (old & PSR_AA32_N_BIT);
  68. new |= (old & PSR_AA32_Z_BIT);
  69. new |= (old & PSR_AA32_C_BIT);
  70. new |= (old & PSR_AA32_V_BIT);
  71. new |= (old & PSR_AA32_Q_BIT);
  72. // CPSR.IT[7:0] are set to zero upon any exception
  73. // See ARM DDI 0487E.a, section G1.12.3
  74. // See ARM DDI 0406C.d, section B1.8.3
  75. new |= (old & PSR_AA32_DIT_BIT);
  76. // CPSR.SSBS is set to SCTLR.DSSBS upon any exception
  77. // See ARM DDI 0487E.a, page G8-6244
  78. if (sctlr & BIT(31))
  79. new |= PSR_AA32_SSBS_BIT;
  80. // CPSR.PAN is unchanged unless SCTLR.SPAN == 0b0
  81. // SCTLR.SPAN is RES1 when ARMv8.1-PAN is not implemented
  82. // See ARM DDI 0487E.a, page G8-6246
  83. new |= (old & PSR_AA32_PAN_BIT);
  84. if (!(sctlr & BIT(23)))
  85. new |= PSR_AA32_PAN_BIT;
  86. // SS does not exist in AArch32, so ignore
  87. // CPSR.IL is set to zero upon any exception
  88. // See ARM DDI 0487E.a, page G1-5527
  89. new |= (old & PSR_AA32_GE_MASK);
  90. // CPSR.IT[7:0] are set to zero upon any exception
  91. // See prior comment above
  92. // CPSR.E is set to SCTLR.EE upon any exception
  93. // See ARM DDI 0487E.a, page G8-6245
  94. // See ARM DDI 0406C.d, page B4-1701
  95. if (sctlr & BIT(25))
  96. new |= PSR_AA32_E_BIT;
  97. // CPSR.A is unchanged upon an exception to Undefined, Supervisor
  98. // CPSR.A is set upon an exception to other modes
  99. // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
  100. // See ARM DDI 0406C.d, page B1-1182
  101. new |= (old & PSR_AA32_A_BIT);
  102. if (mode != PSR_AA32_MODE_UND && mode != PSR_AA32_MODE_SVC)
  103. new |= PSR_AA32_A_BIT;
  104. // CPSR.I is set upon any exception
  105. // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
  106. // See ARM DDI 0406C.d, page B1-1182
  107. new |= PSR_AA32_I_BIT;
  108. // CPSR.F is set upon an exception to FIQ
  109. // CPSR.F is unchanged upon an exception to other modes
  110. // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
  111. // See ARM DDI 0406C.d, page B1-1182
  112. new |= (old & PSR_AA32_F_BIT);
  113. if (mode == PSR_AA32_MODE_FIQ)
  114. new |= PSR_AA32_F_BIT;
  115. // CPSR.T is set to SCTLR.TE upon any exception
  116. // See ARM DDI 0487E.a, page G8-5514
  117. // See ARM DDI 0406C.d, page B1-1181
  118. if (sctlr & BIT(30))
  119. new |= PSR_AA32_T_BIT;
  120. new |= mode;
  121. return new;
  122. }
  123. static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
  124. {
  125. unsigned long spsr = *vcpu_cpsr(vcpu);
  126. bool is_thumb = (spsr & PSR_AA32_T_BIT);
  127. u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
  128. u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
  129. *vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode);
  130. /* Note: These now point to the banked copies */
  131. vcpu_write_spsr(vcpu, host_spsr_to_spsr32(spsr));
  132. *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
  133. /* Branch to exception vector */
  134. if (sctlr & (1 << 13))
  135. vect_offset += 0xffff0000;
  136. else /* always have security exceptions */
  137. vect_offset += vcpu_cp15(vcpu, c12_VBAR);
  138. *vcpu_pc(vcpu) = vect_offset;
  139. }
  140. void kvm_inject_undef32(struct kvm_vcpu *vcpu)
  141. {
  142. prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
  143. }
  144. /*
  145. * Modelled after TakeDataAbortException() and TakePrefetchAbortException
  146. * pseudocode.
  147. */
  148. static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
  149. unsigned long addr)
  150. {
  151. u32 vect_offset;
  152. u32 *far, *fsr;
  153. bool is_lpae;
  154. if (is_pabt) {
  155. vect_offset = 12;
  156. far = &vcpu_cp15(vcpu, c6_IFAR);
  157. fsr = &vcpu_cp15(vcpu, c5_IFSR);
  158. } else { /* !iabt */
  159. vect_offset = 16;
  160. far = &vcpu_cp15(vcpu, c6_DFAR);
  161. fsr = &vcpu_cp15(vcpu, c5_DFSR);
  162. }
  163. prepare_fault32(vcpu, PSR_AA32_MODE_ABT, vect_offset);
  164. *far = addr;
  165. /* Give the guest an IMPLEMENTATION DEFINED exception */
  166. is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
  167. if (is_lpae) {
  168. *fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
  169. } else {
  170. /* no need to shuffle FS[4] into DFSR[10] as its 0 */
  171. *fsr = DFSR_FSC_EXTABT_nLPAE;
  172. }
  173. }
  174. void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
  175. {
  176. inject_abt32(vcpu, false, addr);
  177. }
  178. void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr)
  179. {
  180. inject_abt32(vcpu, true, addr);
  181. }