entry.S 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. /*
  2. * Copyright (C) 2016 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <linux/linkage.h>
  18. #include <asm/asm-offsets.h>
  19. #include <asm/kvm_arm.h>
  20. #include <asm/kvm_asm.h>
  21. .arch_extension virt
  22. .text
  23. .pushsection .hyp.text, "ax"
  24. #define USR_REGS_OFFSET (CPU_CTXT_GP_REGS + GP_REGS_USR)
  25. /* int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host) */
  26. ENTRY(__guest_enter)
  27. @ Save host registers
  28. add r1, r1, #(USR_REGS_OFFSET + S_R4)
  29. stm r1!, {r4-r12}
  30. str lr, [r1, #4] @ Skip SP_usr (already saved)
  31. @ Restore guest registers
  32. add r0, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
  33. ldr lr, [r0, #S_LR]
  34. ldm r0, {r0-r12}
  35. clrex
  36. eret
  37. ENDPROC(__guest_enter)
  38. ENTRY(__guest_exit)
  39. /*
  40. * return convention:
  41. * guest r0, r1, r2 saved on the stack
  42. * r0: vcpu pointer
  43. * r1: exception code
  44. */
  45. add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R3)
  46. stm r2!, {r3-r12}
  47. str lr, [r2, #4]
  48. add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
  49. pop {r3, r4, r5} @ r0, r1, r2
  50. stm r2, {r3-r5}
  51. ldr r0, [r0, #VCPU_HOST_CTXT]
  52. add r0, r0, #(USR_REGS_OFFSET + S_R4)
  53. ldm r0!, {r4-r12}
  54. ldr lr, [r0, #4]
  55. mov r0, r1
  56. mrs r1, SPSR
  57. mrs r2, ELR_hyp
  58. mrc p15, 4, r3, c5, c2, 0 @ HSR
  59. /*
  60. * Force loads and stores to complete before unmasking aborts
  61. * and forcing the delivery of the exception. This gives us a
  62. * single instruction window, which the handler will try to
  63. * match.
  64. */
  65. dsb sy
  66. cpsie a
  67. .global abort_guest_exit_start
  68. abort_guest_exit_start:
  69. isb
  70. .global abort_guest_exit_end
  71. abort_guest_exit_end:
  72. /*
  73. * If we took an abort, r0[31] will be set, and cmp will set
  74. * the N bit in PSTATE.
  75. */
  76. cmp r0, #0
  77. msrmi SPSR_cxsf, r1
  78. msrmi ELR_hyp, r2
  79. mcrmi p15, 4, r3, c5, c2, 0 @ HSR
  80. bx lr
  81. ENDPROC(__guest_exit)
  82. /*
  83. * If VFPv3 support is not available, then we will not switch the VFP
  84. * registers; however cp10 and cp11 accesses will still trap and fallback
  85. * to the regular coprocessor emulation code, which currently will
  86. * inject an undefined exception to the guest.
  87. */
  88. #ifdef CONFIG_VFPv3
  89. ENTRY(__vfp_guest_restore)
  90. push {r3, r4, lr}
  91. @ NEON/VFP used. Turn on VFP access.
  92. mrc p15, 4, r1, c1, c1, 2 @ HCPTR
  93. bic r1, r1, #(HCPTR_TCP(10) | HCPTR_TCP(11))
  94. mcr p15, 4, r1, c1, c1, 2 @ HCPTR
  95. isb
  96. @ Switch VFP/NEON hardware state to the guest's
  97. mov r4, r0
  98. ldr r0, [r0, #VCPU_HOST_CTXT]
  99. add r0, r0, #CPU_CTXT_VFP
  100. bl __vfp_save_state
  101. add r0, r4, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP)
  102. bl __vfp_restore_state
  103. pop {r3, r4, lr}
  104. pop {r0, r1, r2}
  105. clrex
  106. eret
  107. ENDPROC(__vfp_guest_restore)
  108. #endif
  109. .popsection