hyp-entry.S 7.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319
  1. /*
  2. * Copyright (C) 2015-2018 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/arm-smccc.h>
  18. #include <linux/linkage.h>
  19. #include <asm/alternative.h>
  20. #include <asm/assembler.h>
  21. #include <asm/cpufeature.h>
  22. #include <asm/kvm_arm.h>
  23. #include <asm/kvm_asm.h>
  24. #include <asm/kvm_mmu.h>
  25. #include <asm/mmu.h>
  26. .text
  27. .pushsection .hyp.text, "ax"
  28. .macro do_el2_call
  29. /*
  30. * Shuffle the parameters before calling the function
  31. * pointed to in x0. Assumes parameters in x[1,2,3].
  32. */
  33. str lr, [sp, #-16]!
  34. mov lr, x0
  35. mov x0, x1
  36. mov x1, x2
  37. mov x2, x3
  38. blr lr
  39. ldr lr, [sp], #16
  40. .endm
  41. ENTRY(__vhe_hyp_call)
  42. do_el2_call
  43. /*
  44. * We used to rely on having an exception return to get
  45. * an implicit isb. In the E2H case, we don't have it anymore.
  46. * rather than changing all the leaf functions, just do it here
  47. * before returning to the rest of the kernel.
  48. */
  49. isb
  50. ret
  51. ENDPROC(__vhe_hyp_call)
  52. el1_sync: // Guest trapped into EL2
  53. mrs x0, esr_el2
  54. lsr x0, x0, #ESR_ELx_EC_SHIFT
  55. cmp x0, #ESR_ELx_EC_HVC64
  56. ccmp x0, #ESR_ELx_EC_HVC32, #4, ne
  57. b.ne el1_trap
  58. mrs x1, vttbr_el2 // If vttbr is valid, the guest
  59. cbnz x1, el1_hvc_guest // called HVC
  60. /* Here, we're pretty sure the host called HVC. */
  61. ldp x0, x1, [sp], #16
  62. /* Check for a stub HVC call */
  63. cmp x0, #HVC_STUB_HCALL_NR
  64. b.hs 1f
  65. /*
  66. * Compute the idmap address of __kvm_handle_stub_hvc and
  67. * jump there. Since we use kimage_voffset, do not use the
  68. * HYP VA for __kvm_handle_stub_hvc, but the kernel VA instead
  69. * (by loading it from the constant pool).
  70. *
  71. * Preserve x0-x4, which may contain stub parameters.
  72. */
  73. ldr x5, =__kvm_handle_stub_hvc
  74. ldr_l x6, kimage_voffset
  75. /* x5 = __pa(x5) */
  76. sub x5, x5, x6
  77. br x5
  78. 1:
  79. /*
  80. * Perform the EL2 call
  81. */
  82. kern_hyp_va x0
  83. do_el2_call
  84. eret
  85. el1_hvc_guest:
  86. /*
  87. * Fastest possible path for ARM_SMCCC_ARCH_WORKAROUND_1.
  88. * The workaround has already been applied on the host,
  89. * so let's quickly get back to the guest. We don't bother
  90. * restoring x1, as it can be clobbered anyway.
  91. */
  92. ldr x1, [sp] // Guest's x0
  93. eor w1, w1, #ARM_SMCCC_ARCH_WORKAROUND_1
  94. cbz w1, wa_epilogue
  95. /* ARM_SMCCC_ARCH_WORKAROUND_2 handling */
  96. eor w1, w1, #(ARM_SMCCC_ARCH_WORKAROUND_1 ^ \
  97. ARM_SMCCC_ARCH_WORKAROUND_2)
  98. cbnz w1, el1_trap
  99. #ifdef CONFIG_ARM64_SSBD
  100. alternative_cb arm64_enable_wa2_handling
  101. b wa2_end
  102. alternative_cb_end
  103. get_vcpu_ptr x2, x0
  104. ldr x0, [x2, #VCPU_WORKAROUND_FLAGS]
  105. // Sanitize the argument and update the guest flags
  106. ldr x1, [sp, #8] // Guest's x1
  107. clz w1, w1 // Murphy's device:
  108. lsr w1, w1, #5 // w1 = !!w1 without using
  109. eor w1, w1, #1 // the flags...
  110. bfi x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
  111. str x0, [x2, #VCPU_WORKAROUND_FLAGS]
  112. /* Check that we actually need to perform the call */
  113. hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
  114. cbz x0, wa2_end
  115. mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2
  116. smc #0
  117. /* Don't leak data from the SMC call */
  118. mov x3, xzr
  119. wa2_end:
  120. mov x2, xzr
  121. mov x1, xzr
  122. #endif
  123. wa_epilogue:
  124. mov x0, xzr
  125. add sp, sp, #16
  126. eret
  127. el1_trap:
  128. get_vcpu_ptr x1, x0
  129. mov x0, #ARM_EXCEPTION_TRAP
  130. b __guest_exit
  131. el1_irq:
  132. get_vcpu_ptr x1, x0
  133. mov x0, #ARM_EXCEPTION_IRQ
  134. b __guest_exit
  135. el1_error:
  136. get_vcpu_ptr x1, x0
  137. mov x0, #ARM_EXCEPTION_EL1_SERROR
  138. b __guest_exit
  139. el2_error:
  140. ldp x0, x1, [sp], #16
  141. /*
  142. * Only two possibilities:
  143. * 1) Either we come from the exit path, having just unmasked
  144. * PSTATE.A: change the return code to an EL2 fault, and
  145. * carry on, as we're already in a sane state to handle it.
  146. * 2) Or we come from anywhere else, and that's a bug: we panic.
  147. *
  148. * For (1), x0 contains the original return code and x1 doesn't
  149. * contain anything meaningful at that stage. We can reuse them
  150. * as temp registers.
  151. * For (2), who cares?
  152. */
  153. mrs x0, elr_el2
  154. adr x1, abort_guest_exit_start
  155. cmp x0, x1
  156. adr x1, abort_guest_exit_end
  157. ccmp x0, x1, #4, ne
  158. b.ne __hyp_panic
  159. mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
  160. eret
  161. ENTRY(__hyp_do_panic)
  162. mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
  163. PSR_MODE_EL1h)
  164. msr spsr_el2, lr
  165. ldr lr, =panic
  166. msr elr_el2, lr
  167. eret
  168. ENDPROC(__hyp_do_panic)
  169. ENTRY(__hyp_panic)
  170. get_host_ctxt x0, x1
  171. b hyp_panic
  172. ENDPROC(__hyp_panic)
  173. .macro invalid_vector label, target = __hyp_panic
  174. .align 2
  175. \label:
  176. b \target
  177. ENDPROC(\label)
  178. .endm
  179. /* None of these should ever happen */
  180. invalid_vector el2t_sync_invalid
  181. invalid_vector el2t_irq_invalid
  182. invalid_vector el2t_fiq_invalid
  183. invalid_vector el2t_error_invalid
  184. invalid_vector el2h_sync_invalid
  185. invalid_vector el2h_irq_invalid
  186. invalid_vector el2h_fiq_invalid
  187. invalid_vector el1_fiq_invalid
  188. .ltorg
  189. .align 11
  190. .macro valid_vect target
  191. .align 7
  192. stp x0, x1, [sp, #-16]!
  193. b \target
  194. .endm
  195. .macro invalid_vect target
  196. .align 7
  197. b \target
  198. ldp x0, x1, [sp], #16
  199. b \target
  200. .endm
  201. ENTRY(__kvm_hyp_vector)
  202. invalid_vect el2t_sync_invalid // Synchronous EL2t
  203. invalid_vect el2t_irq_invalid // IRQ EL2t
  204. invalid_vect el2t_fiq_invalid // FIQ EL2t
  205. invalid_vect el2t_error_invalid // Error EL2t
  206. invalid_vect el2h_sync_invalid // Synchronous EL2h
  207. invalid_vect el2h_irq_invalid // IRQ EL2h
  208. invalid_vect el2h_fiq_invalid // FIQ EL2h
  209. valid_vect el2_error // Error EL2h
  210. valid_vect el1_sync // Synchronous 64-bit EL1
  211. valid_vect el1_irq // IRQ 64-bit EL1
  212. invalid_vect el1_fiq_invalid // FIQ 64-bit EL1
  213. valid_vect el1_error // Error 64-bit EL1
  214. valid_vect el1_sync // Synchronous 32-bit EL1
  215. valid_vect el1_irq // IRQ 32-bit EL1
  216. invalid_vect el1_fiq_invalid // FIQ 32-bit EL1
  217. valid_vect el1_error // Error 32-bit EL1
  218. ENDPROC(__kvm_hyp_vector)
  219. #ifdef CONFIG_KVM_INDIRECT_VECTORS
  220. .macro hyp_ventry
  221. .align 7
  222. 1: .rept 27
  223. nop
  224. .endr
  225. /*
  226. * The default sequence is to directly branch to the KVM vectors,
  227. * using the computed offset. This applies for VHE as well as
  228. * !ARM64_HARDEN_EL2_VECTORS.
  229. *
  230. * For ARM64_HARDEN_EL2_VECTORS configurations, this gets replaced
  231. * with:
  232. *
  233. * stp x0, x1, [sp, #-16]!
  234. * movz x0, #(addr & 0xffff)
  235. * movk x0, #((addr >> 16) & 0xffff), lsl #16
  236. * movk x0, #((addr >> 32) & 0xffff), lsl #32
  237. * br x0
  238. *
  239. * Where addr = kern_hyp_va(__kvm_hyp_vector) + vector-offset + 4.
  240. * See kvm_patch_vector_branch for details.
  241. */
  242. alternative_cb kvm_patch_vector_branch
  243. b __kvm_hyp_vector + (1b - 0b)
  244. nop
  245. nop
  246. nop
  247. nop
  248. alternative_cb_end
  249. .endm
  250. .macro generate_vectors
  251. 0:
  252. .rept 16
  253. hyp_ventry
  254. .endr
  255. .org 0b + SZ_2K // Safety measure
  256. .endm
  257. .align 11
  258. ENTRY(__bp_harden_hyp_vecs_start)
  259. .rept BP_HARDEN_EL2_SLOTS
  260. generate_vectors
  261. .endr
  262. ENTRY(__bp_harden_hyp_vecs_end)
  263. .popsection
  264. ENTRY(__smccc_workaround_1_smc_start)
  265. sub sp, sp, #(8 * 4)
  266. stp x2, x3, [sp, #(8 * 0)]
  267. stp x0, x1, [sp, #(8 * 2)]
  268. mov w0, #ARM_SMCCC_ARCH_WORKAROUND_1
  269. smc #0
  270. ldp x2, x3, [sp, #(8 * 0)]
  271. ldp x0, x1, [sp, #(8 * 2)]
  272. add sp, sp, #(8 * 4)
  273. ENTRY(__smccc_workaround_1_smc_end)
  274. #endif