xen-asm.S 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151
  1. /*
  2. * Asm versions of Xen pv-ops, suitable for either direct use or
  3. * inlining. The inline versions are the same as the direct-use
  4. * versions, with the pre- and post-amble chopped off.
  5. *
  6. * This code is encoded for size rather than absolute efficiency, with
  7. * a view to being able to inline as much as possible.
  8. *
  9. * We only bother with direct forms (ie, vcpu in percpu data) of the
  10. * operations here; the indirect forms are better handled in C, since
  11. * they're generally too large to inline anyway.
  12. */
  13. #include <asm/asm-offsets.h>
  14. #include <asm/percpu.h>
  15. #include <asm/processor-flags.h>
  16. #include <asm/frame.h>
  17. #include "xen-asm.h"
  18. /*
  19. * Enable events. This clears the event mask and tests the pending
  20. * event status with one and operation. If there are pending events,
  21. * then enter the hypervisor to get them handled.
  22. */
  23. ENTRY(xen_irq_enable_direct)
  24. FRAME_BEGIN
  25. /* Unmask events */
  26. movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  27. /*
  28. * Preempt here doesn't matter because that will deal with any
  29. * pending interrupts. The pending check may end up being run
  30. * on the wrong CPU, but that doesn't hurt.
  31. */
  32. /* Test for pending */
  33. testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
  34. jz 1f
  35. 2: call check_events
  36. 1:
  37. ENDPATCH(xen_irq_enable_direct)
  38. FRAME_END
  39. ret
  40. ENDPROC(xen_irq_enable_direct)
  41. RELOC(xen_irq_enable_direct, 2b+1)
  42. /*
  43. * Disabling events is simply a matter of making the event mask
  44. * non-zero.
  45. */
  46. ENTRY(xen_irq_disable_direct)
  47. movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  48. ENDPATCH(xen_irq_disable_direct)
  49. ret
  50. ENDPROC(xen_irq_disable_direct)
  51. RELOC(xen_irq_disable_direct, 0)
  52. /*
  53. * (xen_)save_fl is used to get the current interrupt enable status.
  54. * Callers expect the status to be in X86_EFLAGS_IF, and other bits
  55. * may be set in the return value. We take advantage of this by
  56. * making sure that X86_EFLAGS_IF has the right value (and other bits
  57. * in that byte are 0), but other bits in the return value are
  58. * undefined. We need to toggle the state of the bit, because Xen and
  59. * x86 use opposite senses (mask vs enable).
  60. */
  61. ENTRY(xen_save_fl_direct)
  62. testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  63. setz %ah
  64. addb %ah, %ah
  65. ENDPATCH(xen_save_fl_direct)
  66. ret
  67. ENDPROC(xen_save_fl_direct)
  68. RELOC(xen_save_fl_direct, 0)
  69. /*
  70. * In principle the caller should be passing us a value return from
  71. * xen_save_fl_direct, but for robustness sake we test only the
  72. * X86_EFLAGS_IF flag rather than the whole byte. After setting the
  73. * interrupt mask state, it checks for unmasked pending events and
  74. * enters the hypervisor to get them delivered if so.
  75. */
  76. ENTRY(xen_restore_fl_direct)
  77. FRAME_BEGIN
  78. #ifdef CONFIG_X86_64
  79. testw $X86_EFLAGS_IF, %di
  80. #else
  81. testb $X86_EFLAGS_IF>>8, %ah
  82. #endif
  83. setz PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask
  84. /*
  85. * Preempt here doesn't matter because that will deal with any
  86. * pending interrupts. The pending check may end up being run
  87. * on the wrong CPU, but that doesn't hurt.
  88. */
  89. /* check for unmasked and pending */
  90. cmpw $0x0001, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending
  91. jnz 1f
  92. 2: call check_events
  93. 1:
  94. ENDPATCH(xen_restore_fl_direct)
  95. FRAME_END
  96. ret
  97. ENDPROC(xen_restore_fl_direct)
  98. RELOC(xen_restore_fl_direct, 2b+1)
  99. /*
  100. * Force an event check by making a hypercall, but preserve regs
  101. * before making the call.
  102. */
  103. ENTRY(check_events)
  104. FRAME_BEGIN
  105. #ifdef CONFIG_X86_32
  106. push %eax
  107. push %ecx
  108. push %edx
  109. call xen_force_evtchn_callback
  110. pop %edx
  111. pop %ecx
  112. pop %eax
  113. #else
  114. push %rax
  115. push %rcx
  116. push %rdx
  117. push %rsi
  118. push %rdi
  119. push %r8
  120. push %r9
  121. push %r10
  122. push %r11
  123. call xen_force_evtchn_callback
  124. pop %r11
  125. pop %r10
  126. pop %r9
  127. pop %r8
  128. pop %rdi
  129. pop %rsi
  130. pop %rdx
  131. pop %rcx
  132. pop %rax
  133. #endif
  134. FRAME_END
  135. ret
  136. ENDPROC(check_events)