xen-asm_64.S 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146
  1. /*
  2. * Asm versions of Xen pv-ops, suitable for either direct use or
  3. * inlining. The inline versions are the same as the direct-use
  4. * versions, with the pre- and post-amble chopped off.
  5. *
  6. * This code is encoded for size rather than absolute efficiency, with
  7. * a view to being able to inline as much as possible.
  8. *
  9. * We only bother with direct forms (ie, vcpu in pda) of the
  10. * operations here; the indirect forms are better handled in C, since
  11. * they're generally too large to inline anyway.
  12. */
  13. #include <asm/errno.h>
  14. #include <asm/percpu.h>
  15. #include <asm/processor-flags.h>
  16. #include <asm/segment.h>
  17. #include <asm/asm-offsets.h>
  18. #include <asm/thread_info.h>
  19. #include <xen/interface/xen.h>
  20. #include "xen-asm.h"
  21. ENTRY(xen_adjust_exception_frame)
  22. mov 8+0(%rsp), %rcx
  23. mov 8+8(%rsp), %r11
  24. ret $16
  25. hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32
  26. /*
  27. * Xen64 iret frame:
  28. *
  29. * ss
  30. * rsp
  31. * rflags
  32. * cs
  33. * rip <-- standard iret frame
  34. *
  35. * flags
  36. *
  37. * rcx }
  38. * r11 }<-- pushed by hypercall page
  39. * rsp->rax }
  40. */
  41. ENTRY(xen_iret)
  42. pushq $0
  43. 1: jmp hypercall_iret
  44. ENDPATCH(xen_iret)
  45. RELOC(xen_iret, 1b+1)
  46. ENTRY(xen_sysret64)
  47. /*
  48. * We're already on the usermode stack at this point, but
  49. * still with the kernel gs, so we can easily switch back
  50. */
  51. movq %rsp, PER_CPU_VAR(rsp_scratch)
  52. movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
  53. pushq $__USER_DS
  54. pushq PER_CPU_VAR(rsp_scratch)
  55. pushq %r11
  56. pushq $__USER_CS
  57. pushq %rcx
  58. pushq $VGCF_in_syscall
  59. 1: jmp hypercall_iret
  60. ENDPATCH(xen_sysret64)
  61. RELOC(xen_sysret64, 1b+1)
  62. ENTRY(xen_sysret32)
  63. /*
  64. * We're already on the usermode stack at this point, but
  65. * still with the kernel gs, so we can easily switch back
  66. */
  67. movq %rsp, PER_CPU_VAR(rsp_scratch)
  68. movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
  69. pushq $__USER32_DS
  70. pushq PER_CPU_VAR(rsp_scratch)
  71. pushq %r11
  72. pushq $__USER32_CS
  73. pushq %rcx
  74. pushq $0
  75. 1: jmp hypercall_iret
  76. ENDPATCH(xen_sysret32)
  77. RELOC(xen_sysret32, 1b+1)
  78. /*
  79. * Xen handles syscall callbacks much like ordinary exceptions, which
  80. * means we have:
  81. * - kernel gs
  82. * - kernel rsp
  83. * - an iret-like stack frame on the stack (including rcx and r11):
  84. * ss
  85. * rsp
  86. * rflags
  87. * cs
  88. * rip
  89. * r11
  90. * rsp->rcx
  91. *
  92. * In all the entrypoints, we undo all that to make it look like a
  93. * CPU-generated syscall/sysenter and jump to the normal entrypoint.
  94. */
  95. .macro undo_xen_syscall
  96. mov 0*8(%rsp), %rcx
  97. mov 1*8(%rsp), %r11
  98. mov 5*8(%rsp), %rsp
  99. .endm
  100. /* Normal 64-bit system call target */
  101. ENTRY(xen_syscall_target)
  102. undo_xen_syscall
  103. jmp entry_SYSCALL_64_after_swapgs
  104. ENDPROC(xen_syscall_target)
  105. #ifdef CONFIG_IA32_EMULATION
  106. /* 32-bit compat syscall target */
  107. ENTRY(xen_syscall32_target)
  108. undo_xen_syscall
  109. jmp entry_SYSCALL_compat
  110. ENDPROC(xen_syscall32_target)
  111. /* 32-bit compat sysenter target */
  112. ENTRY(xen_sysenter_target)
  113. undo_xen_syscall
  114. jmp entry_SYSENTER_compat
  115. ENDPROC(xen_sysenter_target)
  116. #else /* !CONFIG_IA32_EMULATION */
  117. ENTRY(xen_syscall32_target)
  118. ENTRY(xen_sysenter_target)
  119. lea 16(%rsp), %rsp /* strip %rcx, %r11 */
  120. mov $-ENOSYS, %rax
  121. pushq $0
  122. jmp hypercall_iret
  123. ENDPROC(xen_syscall32_target)
  124. ENDPROC(xen_sysenter_target)
  125. #endif /* CONFIG_IA32_EMULATION */