kvm_minstate.h 8.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. /*
  2. * kvm_minstate.h: min save macros
  3. * Copyright (c) 2007, Intel Corporation.
  4. *
  5. * Xuefei Xu (Anthony Xu) (Anthony.xu@intel.com)
  6. * Xiantao Zhang (xiantao.zhang@intel.com)
  7. *
  8. * This program is free software; you can redistribute it and/or modify it
  9. * under the terms and conditions of the GNU General Public License,
  10. * version 2, as published by the Free Software Foundation.
  11. *
  12. * This program is distributed in the hope it will be useful, but WITHOUT
  13. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  14. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  15. * more details.
  16. *
  17. * You should have received a copy of the GNU General Public License along with
  18. * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
  19. * Place - Suite 330, Boston, MA 02111-1307 USA.
  20. *
  21. */
  22. #include <asm/asmmacro.h>
  23. #include <asm/types.h>
  24. #include <asm/kregs.h>
  25. #include <asm/kvm_host.h>
  26. #include "asm-offsets.h"
  27. #define KVM_MINSTATE_START_SAVE_MIN \
  28. mov ar.rsc = 0;/* set enforced lazy mode, pl 0, little-endian, loadrs=0 */\
  29. ;; \
  30. mov.m r28 = ar.rnat; \
  31. addl r22 = VMM_RBS_OFFSET,r1; /* compute base of RBS */ \
  32. ;; \
  33. lfetch.fault.excl.nt1 [r22]; \
  34. addl r1 = KVM_STK_OFFSET-VMM_PT_REGS_SIZE, r1; \
  35. mov r23 = ar.bspstore; /* save ar.bspstore */ \
  36. ;; \
  37. mov ar.bspstore = r22; /* switch to kernel RBS */\
  38. ;; \
  39. mov r18 = ar.bsp; \
  40. mov ar.rsc = 0x3; /* set eager mode, pl 0, little-endian, loadrs=0 */
  41. #define KVM_MINSTATE_END_SAVE_MIN \
  42. bsw.1; /* switch back to bank 1 (must be last in insn group) */\
  43. ;;
  44. #define PAL_VSA_SYNC_READ \
  45. /* begin to call pal vps sync_read */ \
  46. {.mii; \
  47. add r25 = VMM_VPD_BASE_OFFSET, r21; \
  48. nop 0x0; \
  49. mov r24=ip; \
  50. ;; \
  51. } \
  52. {.mmb \
  53. add r24=0x20, r24; \
  54. ld8 r25 = [r25]; /* read vpd base */ \
  55. br.cond.sptk kvm_vps_sync_read; /*call the service*/ \
  56. ;; \
  57. }; \
  58. #define KVM_MINSTATE_GET_CURRENT(reg) mov reg=r21
  59. /*
  60. * KVM_DO_SAVE_MIN switches to the kernel stacks (if necessary) and saves
  61. * the minimum state necessary that allows us to turn psr.ic back
  62. * on.
  63. *
  64. * Assumed state upon entry:
  65. * psr.ic: off
  66. * r31: contains saved predicates (pr)
  67. *
  68. * Upon exit, the state is as follows:
  69. * psr.ic: off
  70. * r2 = points to &pt_regs.r16
  71. * r8 = contents of ar.ccv
  72. * r9 = contents of ar.csd
  73. * r10 = contents of ar.ssd
  74. * r11 = FPSR_DEFAULT
  75. * r12 = kernel sp (kernel virtual address)
  76. * r13 = points to current task_struct (kernel virtual address)
  77. * p15 = TRUE if psr.i is set in cr.ipsr
  78. * predicate registers (other than p2, p3, and p15), b6, r3, r14, r15:
  79. * preserved
  80. *
  81. * Note that psr.ic is NOT turned on by this macro. This is so that
  82. * we can pass interruption state as arguments to a handler.
  83. */
  84. #define PT(f) (VMM_PT_REGS_##f##_OFFSET)
  85. #define KVM_DO_SAVE_MIN(COVER,SAVE_IFS,EXTRA) \
  86. KVM_MINSTATE_GET_CURRENT(r16); /* M (or M;;I) */ \
  87. mov r27 = ar.rsc; /* M */ \
  88. mov r20 = r1; /* A */ \
  89. mov r25 = ar.unat; /* M */ \
  90. mov r29 = cr.ipsr; /* M */ \
  91. mov r26 = ar.pfs; /* I */ \
  92. mov r18 = cr.isr; \
  93. COVER; /* B;; (or nothing) */ \
  94. ;; \
  95. tbit.z p0,p15 = r29,IA64_PSR_I_BIT; \
  96. mov r1 = r16; \
  97. /* mov r21=r16; */ \
  98. /* switch from user to kernel RBS: */ \
  99. ;; \
  100. invala; /* M */ \
  101. SAVE_IFS; \
  102. ;; \
  103. KVM_MINSTATE_START_SAVE_MIN \
  104. adds r17 = 2*L1_CACHE_BYTES,r1;/* cache-line size */ \
  105. adds r16 = PT(CR_IPSR),r1; \
  106. ;; \
  107. lfetch.fault.excl.nt1 [r17],L1_CACHE_BYTES; \
  108. st8 [r16] = r29; /* save cr.ipsr */ \
  109. ;; \
  110. lfetch.fault.excl.nt1 [r17]; \
  111. tbit.nz p15,p0 = r29,IA64_PSR_I_BIT; \
  112. mov r29 = b0 \
  113. ;; \
  114. adds r16 = PT(R8),r1; /* initialize first base pointer */\
  115. adds r17 = PT(R9),r1; /* initialize second base pointer */\
  116. ;; \
  117. .mem.offset 0,0; st8.spill [r16] = r8,16; \
  118. .mem.offset 8,0; st8.spill [r17] = r9,16; \
  119. ;; \
  120. .mem.offset 0,0; st8.spill [r16] = r10,24; \
  121. .mem.offset 8,0; st8.spill [r17] = r11,24; \
  122. ;; \
  123. mov r9 = cr.iip; /* M */ \
  124. mov r10 = ar.fpsr; /* M */ \
  125. ;; \
  126. st8 [r16] = r9,16; /* save cr.iip */ \
  127. st8 [r17] = r30,16; /* save cr.ifs */ \
  128. sub r18 = r18,r22; /* r18=RSE.ndirty*8 */ \
  129. ;; \
  130. st8 [r16] = r25,16; /* save ar.unat */ \
  131. st8 [r17] = r26,16; /* save ar.pfs */ \
  132. shl r18 = r18,16; /* calu ar.rsc used for "loadrs" */\
  133. ;; \
  134. st8 [r16] = r27,16; /* save ar.rsc */ \
  135. st8 [r17] = r28,16; /* save ar.rnat */ \
  136. ;; /* avoid RAW on r16 & r17 */ \
  137. st8 [r16] = r23,16; /* save ar.bspstore */ \
  138. st8 [r17] = r31,16; /* save predicates */ \
  139. ;; \
  140. st8 [r16] = r29,16; /* save b0 */ \
  141. st8 [r17] = r18,16; /* save ar.rsc value for "loadrs" */\
  142. ;; \
  143. .mem.offset 0,0; st8.spill [r16] = r20,16;/* save original r1 */ \
  144. .mem.offset 8,0; st8.spill [r17] = r12,16; \
  145. adds r12 = -16,r1; /* switch to kernel memory stack */ \
  146. ;; \
  147. .mem.offset 0,0; st8.spill [r16] = r13,16; \
  148. .mem.offset 8,0; st8.spill [r17] = r10,16; /* save ar.fpsr */\
  149. mov r13 = r21; /* establish `current' */ \
  150. ;; \
  151. .mem.offset 0,0; st8.spill [r16] = r15,16; \
  152. .mem.offset 8,0; st8.spill [r17] = r14,16; \
  153. ;; \
  154. .mem.offset 0,0; st8.spill [r16] = r2,16; \
  155. .mem.offset 8,0; st8.spill [r17] = r3,16; \
  156. adds r2 = VMM_PT_REGS_R16_OFFSET,r1; \
  157. ;; \
  158. adds r16 = VMM_VCPU_IIPA_OFFSET,r13; \
  159. adds r17 = VMM_VCPU_ISR_OFFSET,r13; \
  160. mov r26 = cr.iipa; \
  161. mov r27 = cr.isr; \
  162. ;; \
  163. st8 [r16] = r26; \
  164. st8 [r17] = r27; \
  165. ;; \
  166. EXTRA; \
  167. mov r8 = ar.ccv; \
  168. mov r9 = ar.csd; \
  169. mov r10 = ar.ssd; \
  170. movl r11 = FPSR_DEFAULT; /* L-unit */ \
  171. adds r17 = VMM_VCPU_GP_OFFSET,r13; \
  172. ;; \
  173. ld8 r1 = [r17];/* establish kernel global pointer */ \
  174. ;; \
  175. PAL_VSA_SYNC_READ \
  176. KVM_MINSTATE_END_SAVE_MIN
  177. /*
  178. * SAVE_REST saves the remainder of pt_regs (with psr.ic on).
  179. *
  180. * Assumed state upon entry:
  181. * psr.ic: on
  182. * r2: points to &pt_regs.f6
  183. * r3: points to &pt_regs.f7
  184. * r8: contents of ar.ccv
  185. * r9: contents of ar.csd
  186. * r10: contents of ar.ssd
  187. * r11: FPSR_DEFAULT
  188. *
  189. * Registers r14 and r15 are guaranteed not to be touched by SAVE_REST.
  190. */
  191. #define KVM_SAVE_REST \
  192. .mem.offset 0,0; st8.spill [r2] = r16,16; \
  193. .mem.offset 8,0; st8.spill [r3] = r17,16; \
  194. ;; \
  195. .mem.offset 0,0; st8.spill [r2] = r18,16; \
  196. .mem.offset 8,0; st8.spill [r3] = r19,16; \
  197. ;; \
  198. .mem.offset 0,0; st8.spill [r2] = r20,16; \
  199. .mem.offset 8,0; st8.spill [r3] = r21,16; \
  200. mov r18=b6; \
  201. ;; \
  202. .mem.offset 0,0; st8.spill [r2] = r22,16; \
  203. .mem.offset 8,0; st8.spill [r3] = r23,16; \
  204. mov r19 = b7; \
  205. ;; \
  206. .mem.offset 0,0; st8.spill [r2] = r24,16; \
  207. .mem.offset 8,0; st8.spill [r3] = r25,16; \
  208. ;; \
  209. .mem.offset 0,0; st8.spill [r2] = r26,16; \
  210. .mem.offset 8,0; st8.spill [r3] = r27,16; \
  211. ;; \
  212. .mem.offset 0,0; st8.spill [r2] = r28,16; \
  213. .mem.offset 8,0; st8.spill [r3] = r29,16; \
  214. ;; \
  215. .mem.offset 0,0; st8.spill [r2] = r30,16; \
  216. .mem.offset 8,0; st8.spill [r3] = r31,32; \
  217. ;; \
  218. mov ar.fpsr = r11; \
  219. st8 [r2] = r8,8; \
  220. adds r24 = PT(B6)-PT(F7),r3; \
  221. adds r25 = PT(B7)-PT(F7),r3; \
  222. ;; \
  223. st8 [r24] = r18,16; /* b6 */ \
  224. st8 [r25] = r19,16; /* b7 */ \
  225. adds r2 = PT(R4)-PT(F6),r2; \
  226. adds r3 = PT(R5)-PT(F7),r3; \
  227. ;; \
  228. st8 [r24] = r9; /* ar.csd */ \
  229. st8 [r25] = r10; /* ar.ssd */ \
  230. ;; \
  231. mov r18 = ar.unat; \
  232. adds r19 = PT(EML_UNAT)-PT(R4),r2; \
  233. ;; \
  234. st8 [r19] = r18; /* eml_unat */ \
  235. #define KVM_SAVE_EXTRA \
  236. .mem.offset 0,0; st8.spill [r2] = r4,16; \
  237. .mem.offset 8,0; st8.spill [r3] = r5,16; \
  238. ;; \
  239. .mem.offset 0,0; st8.spill [r2] = r6,16; \
  240. .mem.offset 8,0; st8.spill [r3] = r7; \
  241. ;; \
  242. mov r26 = ar.unat; \
  243. ;; \
  244. st8 [r2] = r26;/* eml_unat */ \
  245. #define KVM_SAVE_MIN_WITH_COVER KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs,)
  246. #define KVM_SAVE_MIN_WITH_COVER_R19 KVM_DO_SAVE_MIN(cover, mov r30 = cr.ifs, mov r15 = r19)
  247. #define KVM_SAVE_MIN KVM_DO_SAVE_MIN( , mov r30 = r0, )