xen-asm.S 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Asm versions of Xen pv-ops, suitable for direct use.
  4. *
  5. * We only bother with direct forms (ie, vcpu in percpu data) of the
  6. * operations here; the indirect forms are better handled in C.
  7. */
  8. #include <asm/asm-offsets.h>
  9. #include <asm/percpu.h>
  10. #include <asm/processor-flags.h>
  11. #include <asm/frame.h>
  12. #include <linux/linkage.h>
  13. /*
  14. * Enable events. This clears the event mask and tests the pending
  15. * event status with one and operation. If there are pending events,
  16. * then enter the hypervisor to get them handled.
  17. */
  18. ENTRY(xen_irq_enable_direct)
  19. FRAME_BEGIN
  20. /* Unmask events */
  21. movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  22. /*
  23. * Preempt here doesn't matter because that will deal with any
  24. * pending interrupts. The pending check may end up being run
  25. * on the wrong CPU, but that doesn't hurt.
  26. */
  27. /* Test for pending */
  28. testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
  29. jz 1f
  30. call check_events
  31. 1:
  32. FRAME_END
  33. ret
  34. ENDPROC(xen_irq_enable_direct)
  35. /*
  36. * Disabling events is simply a matter of making the event mask
  37. * non-zero.
  38. */
  39. ENTRY(xen_irq_disable_direct)
  40. movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  41. ret
  42. ENDPROC(xen_irq_disable_direct)
  43. /*
  44. * (xen_)save_fl is used to get the current interrupt enable status.
  45. * Callers expect the status to be in X86_EFLAGS_IF, and other bits
  46. * may be set in the return value. We take advantage of this by
  47. * making sure that X86_EFLAGS_IF has the right value (and other bits
  48. * in that byte are 0), but other bits in the return value are
  49. * undefined. We need to toggle the state of the bit, because Xen and
  50. * x86 use opposite senses (mask vs enable).
  51. */
  52. ENTRY(xen_save_fl_direct)
  53. testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  54. setz %ah
  55. addb %ah, %ah
  56. ret
  57. ENDPROC(xen_save_fl_direct)
  58. /*
  59. * In principle the caller should be passing us a value return from
  60. * xen_save_fl_direct, but for robustness sake we test only the
  61. * X86_EFLAGS_IF flag rather than the whole byte. After setting the
  62. * interrupt mask state, it checks for unmasked pending events and
  63. * enters the hypervisor to get them delivered if so.
  64. */
  65. ENTRY(xen_restore_fl_direct)
  66. FRAME_BEGIN
  67. #ifdef CONFIG_X86_64
  68. testw $X86_EFLAGS_IF, %di
  69. #else
  70. testb $X86_EFLAGS_IF>>8, %ah
  71. #endif
  72. setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  73. /*
  74. * Preempt here doesn't matter because that will deal with any
  75. * pending interrupts. The pending check may end up being run
  76. * on the wrong CPU, but that doesn't hurt.
  77. */
  78. /* check for unmasked and pending */
  79. cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
  80. jnz 1f
  81. call check_events
  82. 1:
  83. FRAME_END
  84. ret
  85. ENDPROC(xen_restore_fl_direct)
  86. /*
  87. * Force an event check by making a hypercall, but preserve regs
  88. * before making the call.
  89. */
  90. ENTRY(check_events)
  91. FRAME_BEGIN
  92. #ifdef CONFIG_X86_32
  93. push %eax
  94. push %ecx
  95. push %edx
  96. call xen_force_evtchn_callback
  97. pop %edx
  98. pop %ecx
  99. pop %eax
  100. #else
  101. push %rax
  102. push %rcx
  103. push %rdx
  104. push %rsi
  105. push %rdi
  106. push %r8
  107. push %r9
  108. push %r10
  109. push %r11
  110. call xen_force_evtchn_callback
  111. pop %r11
  112. pop %r10
  113. pop %r9
  114. pop %r8
  115. pop %rdi
  116. pop %rsi
  117. pop %rdx
  118. pop %rcx
  119. pop %rax
  120. #endif
  121. FRAME_END
  122. ret
  123. ENDPROC(check_events)