inst.h 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487
  1. /******************************************************************************
  2. * arch/ia64/include/asm/xen/inst.h
  3. *
  4. * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp>
  5. * VA Linux Systems Japan K.K.
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License as published by
  9. * the Free Software Foundation; either version 2 of the License, or
  10. * (at your option) any later version.
  11. *
  12. * This program is distributed in the hope that it will be useful,
  13. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  15. * GNU General Public License for more details.
  16. *
  17. * You should have received a copy of the GNU General Public License
  18. * along with this program; if not, write to the Free Software
  19. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  20. *
  21. */
  22. #include <asm/xen/privop.h>
  23. #define ia64_ivt xen_ivt
  24. #define DO_SAVE_MIN XEN_DO_SAVE_MIN
  25. #define __paravirt_switch_to xen_switch_to
  26. #define __paravirt_leave_syscall xen_leave_syscall
  27. #define __paravirt_work_processed_syscall xen_work_processed_syscall
  28. #define __paravirt_leave_kernel xen_leave_kernel
  29. #define __paravirt_pending_syscall_end xen_work_pending_syscall_end
  30. #define __paravirt_work_processed_syscall_target \
  31. xen_work_processed_syscall
  32. #define paravirt_fsyscall_table xen_fsyscall_table
  33. #define paravirt_fsys_bubble_down xen_fsys_bubble_down
  34. #define MOV_FROM_IFA(reg) \
  35. movl reg = XSI_IFA; \
  36. ;; \
  37. ld8 reg = [reg]
  38. #define MOV_FROM_ITIR(reg) \
  39. movl reg = XSI_ITIR; \
  40. ;; \
  41. ld8 reg = [reg]
  42. #define MOV_FROM_ISR(reg) \
  43. movl reg = XSI_ISR; \
  44. ;; \
  45. ld8 reg = [reg]
  46. #define MOV_FROM_IHA(reg) \
  47. movl reg = XSI_IHA; \
  48. ;; \
  49. ld8 reg = [reg]
  50. #define MOV_FROM_IPSR(pred, reg) \
  51. (pred) movl reg = XSI_IPSR; \
  52. ;; \
  53. (pred) ld8 reg = [reg]
  54. #define MOV_FROM_IIM(reg) \
  55. movl reg = XSI_IIM; \
  56. ;; \
  57. ld8 reg = [reg]
  58. #define MOV_FROM_IIP(reg) \
  59. movl reg = XSI_IIP; \
  60. ;; \
  61. ld8 reg = [reg]
  62. .macro __MOV_FROM_IVR reg, clob
  63. .ifc "\reg", "r8"
  64. XEN_HYPER_GET_IVR
  65. .exitm
  66. .endif
  67. .ifc "\clob", "r8"
  68. XEN_HYPER_GET_IVR
  69. ;;
  70. mov \reg = r8
  71. .exitm
  72. .endif
  73. mov \clob = r8
  74. ;;
  75. XEN_HYPER_GET_IVR
  76. ;;
  77. mov \reg = r8
  78. ;;
  79. mov r8 = \clob
  80. .endm
  81. #define MOV_FROM_IVR(reg, clob) __MOV_FROM_IVR reg, clob
  82. .macro __MOV_FROM_PSR pred, reg, clob
  83. .ifc "\reg", "r8"
  84. (\pred) XEN_HYPER_GET_PSR;
  85. .exitm
  86. .endif
  87. .ifc "\clob", "r8"
  88. (\pred) XEN_HYPER_GET_PSR
  89. ;;
  90. (\pred) mov \reg = r8
  91. .exitm
  92. .endif
  93. (\pred) mov \clob = r8
  94. (\pred) XEN_HYPER_GET_PSR
  95. ;;
  96. (\pred) mov \reg = r8
  97. (\pred) mov r8 = \clob
  98. .endm
  99. #define MOV_FROM_PSR(pred, reg, clob) __MOV_FROM_PSR pred, reg, clob
  100. /* assuming ar.itc is read with interrupt disabled. */
  101. #define MOV_FROM_ITC(pred, pred_clob, reg, clob) \
  102. (pred) movl clob = XSI_ITC_OFFSET; \
  103. ;; \
  104. (pred) ld8 clob = [clob]; \
  105. (pred) mov reg = ar.itc; \
  106. ;; \
  107. (pred) add reg = reg, clob; \
  108. ;; \
  109. (pred) movl clob = XSI_ITC_LAST; \
  110. ;; \
  111. (pred) ld8 clob = [clob]; \
  112. ;; \
  113. (pred) cmp.geu.unc pred_clob, p0 = clob, reg; \
  114. ;; \
  115. (pred_clob) add reg = 1, clob; \
  116. ;; \
  117. (pred) movl clob = XSI_ITC_LAST; \
  118. ;; \
  119. (pred) st8 [clob] = reg
  120. #define MOV_TO_IFA(reg, clob) \
  121. movl clob = XSI_IFA; \
  122. ;; \
  123. st8 [clob] = reg \
  124. #define MOV_TO_ITIR(pred, reg, clob) \
  125. (pred) movl clob = XSI_ITIR; \
  126. ;; \
  127. (pred) st8 [clob] = reg
  128. #define MOV_TO_IHA(pred, reg, clob) \
  129. (pred) movl clob = XSI_IHA; \
  130. ;; \
  131. (pred) st8 [clob] = reg
  132. #define MOV_TO_IPSR(pred, reg, clob) \
  133. (pred) movl clob = XSI_IPSR; \
  134. ;; \
  135. (pred) st8 [clob] = reg; \
  136. ;;
  137. #define MOV_TO_IFS(pred, reg, clob) \
  138. (pred) movl clob = XSI_IFS; \
  139. ;; \
  140. (pred) st8 [clob] = reg; \
  141. ;;
  142. #define MOV_TO_IIP(reg, clob) \
  143. movl clob = XSI_IIP; \
  144. ;; \
  145. st8 [clob] = reg
  146. .macro ____MOV_TO_KR kr, reg, clob0, clob1
  147. .ifc "\clob0", "r9"
  148. .error "clob0 \clob0 must not be r9"
  149. .endif
  150. .ifc "\clob1", "r8"
  151. .error "clob1 \clob1 must not be r8"
  152. .endif
  153. .ifnc "\reg", "r9"
  154. .ifnc "\clob1", "r9"
  155. mov \clob1 = r9
  156. .endif
  157. mov r9 = \reg
  158. .endif
  159. .ifnc "\clob0", "r8"
  160. mov \clob0 = r8
  161. .endif
  162. mov r8 = \kr
  163. ;;
  164. XEN_HYPER_SET_KR
  165. .ifnc "\reg", "r9"
  166. .ifnc "\clob1", "r9"
  167. mov r9 = \clob1
  168. .endif
  169. .endif
  170. .ifnc "\clob0", "r8"
  171. mov r8 = \clob0
  172. .endif
  173. .endm
  174. .macro __MOV_TO_KR kr, reg, clob0, clob1
  175. .ifc "\clob0", "r9"
  176. ____MOV_TO_KR \kr, \reg, \clob1, \clob0
  177. .exitm
  178. .endif
  179. .ifc "\clob1", "r8"
  180. ____MOV_TO_KR \kr, \reg, \clob1, \clob0
  181. .exitm
  182. .endif
  183. ____MOV_TO_KR \kr, \reg, \clob0, \clob1
  184. .endm
  185. #define MOV_TO_KR(kr, reg, clob0, clob1) \
  186. __MOV_TO_KR IA64_KR_ ## kr, reg, clob0, clob1
  187. .macro __ITC_I pred, reg, clob
  188. .ifc "\reg", "r8"
  189. (\pred) XEN_HYPER_ITC_I
  190. .exitm
  191. .endif
  192. .ifc "\clob", "r8"
  193. (\pred) mov r8 = \reg
  194. ;;
  195. (\pred) XEN_HYPER_ITC_I
  196. .exitm
  197. .endif
  198. (\pred) mov \clob = r8
  199. (\pred) mov r8 = \reg
  200. ;;
  201. (\pred) XEN_HYPER_ITC_I
  202. ;;
  203. (\pred) mov r8 = \clob
  204. ;;
  205. .endm
  206. #define ITC_I(pred, reg, clob) __ITC_I pred, reg, clob
  207. .macro __ITC_D pred, reg, clob
  208. .ifc "\reg", "r8"
  209. (\pred) XEN_HYPER_ITC_D
  210. ;;
  211. .exitm
  212. .endif
  213. .ifc "\clob", "r8"
  214. (\pred) mov r8 = \reg
  215. ;;
  216. (\pred) XEN_HYPER_ITC_D
  217. ;;
  218. .exitm
  219. .endif
  220. (\pred) mov \clob = r8
  221. (\pred) mov r8 = \reg
  222. ;;
  223. (\pred) XEN_HYPER_ITC_D
  224. ;;
  225. (\pred) mov r8 = \clob
  226. ;;
  227. .endm
  228. #define ITC_D(pred, reg, clob) __ITC_D pred, reg, clob
  229. .macro __ITC_I_AND_D pred_i, pred_d, reg, clob
  230. .ifc "\reg", "r8"
  231. (\pred_i)XEN_HYPER_ITC_I
  232. ;;
  233. (\pred_d)XEN_HYPER_ITC_D
  234. ;;
  235. .exitm
  236. .endif
  237. .ifc "\clob", "r8"
  238. mov r8 = \reg
  239. ;;
  240. (\pred_i)XEN_HYPER_ITC_I
  241. ;;
  242. (\pred_d)XEN_HYPER_ITC_D
  243. ;;
  244. .exitm
  245. .endif
  246. mov \clob = r8
  247. mov r8 = \reg
  248. ;;
  249. (\pred_i)XEN_HYPER_ITC_I
  250. ;;
  251. (\pred_d)XEN_HYPER_ITC_D
  252. ;;
  253. mov r8 = \clob
  254. ;;
  255. .endm
  256. #define ITC_I_AND_D(pred_i, pred_d, reg, clob) \
  257. __ITC_I_AND_D pred_i, pred_d, reg, clob
  258. .macro __THASH pred, reg0, reg1, clob
  259. .ifc "\reg0", "r8"
  260. (\pred) mov r8 = \reg1
  261. (\pred) XEN_HYPER_THASH
  262. .exitm
  263. .endc
  264. .ifc "\reg1", "r8"
  265. (\pred) XEN_HYPER_THASH
  266. ;;
  267. (\pred) mov \reg0 = r8
  268. ;;
  269. .exitm
  270. .endif
  271. .ifc "\clob", "r8"
  272. (\pred) mov r8 = \reg1
  273. (\pred) XEN_HYPER_THASH
  274. ;;
  275. (\pred) mov \reg0 = r8
  276. ;;
  277. .exitm
  278. .endif
  279. (\pred) mov \clob = r8
  280. (\pred) mov r8 = \reg1
  281. (\pred) XEN_HYPER_THASH
  282. ;;
  283. (\pred) mov \reg0 = r8
  284. (\pred) mov r8 = \clob
  285. ;;
  286. .endm
  287. #define THASH(pred, reg0, reg1, clob) __THASH pred, reg0, reg1, clob
  288. #define SSM_PSR_IC_AND_DEFAULT_BITS_AND_SRLZ_I(clob0, clob1) \
  289. mov clob0 = 1; \
  290. movl clob1 = XSI_PSR_IC; \
  291. ;; \
  292. st4 [clob1] = clob0 \
  293. ;;
  294. #define SSM_PSR_IC_AND_SRLZ_D(clob0, clob1) \
  295. ;; \
  296. srlz.d; \
  297. mov clob1 = 1; \
  298. movl clob0 = XSI_PSR_IC; \
  299. ;; \
  300. st4 [clob0] = clob1
  301. #define RSM_PSR_IC(clob) \
  302. movl clob = XSI_PSR_IC; \
  303. ;; \
  304. st4 [clob] = r0; \
  305. ;;
  306. /* pred will be clobbered */
  307. #define MASK_TO_PEND_OFS (-1)
  308. #define SSM_PSR_I(pred, pred_clob, clob) \
  309. (pred) movl clob = XSI_PSR_I_ADDR \
  310. ;; \
  311. (pred) ld8 clob = [clob] \
  312. ;; \
  313. /* if (pred) vpsr.i = 1 */ \
  314. /* if (pred) (vcpu->vcpu_info->evtchn_upcall_mask)=0 */ \
  315. (pred) st1 [clob] = r0, MASK_TO_PEND_OFS \
  316. ;; \
  317. /* if (vcpu->vcpu_info->evtchn_upcall_pending) */ \
  318. (pred) ld1 clob = [clob] \
  319. ;; \
  320. (pred) cmp.ne.unc pred_clob, p0 = clob, r0 \
  321. ;; \
  322. (pred_clob)XEN_HYPER_SSM_I /* do areal ssm psr.i */
  323. #define RSM_PSR_I(pred, clob0, clob1) \
  324. movl clob0 = XSI_PSR_I_ADDR; \
  325. mov clob1 = 1; \
  326. ;; \
  327. ld8 clob0 = [clob0]; \
  328. ;; \
  329. (pred) st1 [clob0] = clob1
  330. #define RSM_PSR_I_IC(clob0, clob1, clob2) \
  331. movl clob0 = XSI_PSR_I_ADDR; \
  332. movl clob1 = XSI_PSR_IC; \
  333. ;; \
  334. ld8 clob0 = [clob0]; \
  335. mov clob2 = 1; \
  336. ;; \
  337. /* note: clears both vpsr.i and vpsr.ic! */ \
  338. st1 [clob0] = clob2; \
  339. st4 [clob1] = r0; \
  340. ;;
  341. #define RSM_PSR_DT \
  342. XEN_HYPER_RSM_PSR_DT
  343. #define RSM_PSR_BE_I(clob0, clob1) \
  344. RSM_PSR_I(p0, clob0, clob1); \
  345. rum psr.be
  346. #define SSM_PSR_DT_AND_SRLZ_I \
  347. XEN_HYPER_SSM_PSR_DT
  348. #define BSW_0(clob0, clob1, clob2) \
  349. ;; \
  350. /* r16-r31 all now hold bank1 values */ \
  351. mov clob2 = ar.unat; \
  352. movl clob0 = XSI_BANK1_R16; \
  353. movl clob1 = XSI_BANK1_R16 + 8; \
  354. ;; \
  355. .mem.offset 0, 0; st8.spill [clob0] = r16, 16; \
  356. .mem.offset 8, 0; st8.spill [clob1] = r17, 16; \
  357. ;; \
  358. .mem.offset 0, 0; st8.spill [clob0] = r18, 16; \
  359. .mem.offset 8, 0; st8.spill [clob1] = r19, 16; \
  360. ;; \
  361. .mem.offset 0, 0; st8.spill [clob0] = r20, 16; \
  362. .mem.offset 8, 0; st8.spill [clob1] = r21, 16; \
  363. ;; \
  364. .mem.offset 0, 0; st8.spill [clob0] = r22, 16; \
  365. .mem.offset 8, 0; st8.spill [clob1] = r23, 16; \
  366. ;; \
  367. .mem.offset 0, 0; st8.spill [clob0] = r24, 16; \
  368. .mem.offset 8, 0; st8.spill [clob1] = r25, 16; \
  369. ;; \
  370. .mem.offset 0, 0; st8.spill [clob0] = r26, 16; \
  371. .mem.offset 8, 0; st8.spill [clob1] = r27, 16; \
  372. ;; \
  373. .mem.offset 0, 0; st8.spill [clob0] = r28, 16; \
  374. .mem.offset 8, 0; st8.spill [clob1] = r29, 16; \
  375. ;; \
  376. .mem.offset 0, 0; st8.spill [clob0] = r30, 16; \
  377. .mem.offset 8, 0; st8.spill [clob1] = r31, 16; \
  378. ;; \
  379. mov clob1 = ar.unat; \
  380. movl clob0 = XSI_B1NAT; \
  381. ;; \
  382. st8 [clob0] = clob1; \
  383. mov ar.unat = clob2; \
  384. movl clob0 = XSI_BANKNUM; \
  385. ;; \
  386. st4 [clob0] = r0
  387. /* FIXME: THIS CODE IS NOT NaT SAFE! */
  388. #define XEN_BSW_1(clob) \
  389. mov clob = ar.unat; \
  390. movl r30 = XSI_B1NAT; \
  391. ;; \
  392. ld8 r30 = [r30]; \
  393. mov r31 = 1; \
  394. ;; \
  395. mov ar.unat = r30; \
  396. movl r30 = XSI_BANKNUM; \
  397. ;; \
  398. st4 [r30] = r31; \
  399. movl r30 = XSI_BANK1_R16; \
  400. movl r31 = XSI_BANK1_R16+8; \
  401. ;; \
  402. ld8.fill r16 = [r30], 16; \
  403. ld8.fill r17 = [r31], 16; \
  404. ;; \
  405. ld8.fill r18 = [r30], 16; \
  406. ld8.fill r19 = [r31], 16; \
  407. ;; \
  408. ld8.fill r20 = [r30], 16; \
  409. ld8.fill r21 = [r31], 16; \
  410. ;; \
  411. ld8.fill r22 = [r30], 16; \
  412. ld8.fill r23 = [r31], 16; \
  413. ;; \
  414. ld8.fill r24 = [r30], 16; \
  415. ld8.fill r25 = [r31], 16; \
  416. ;; \
  417. ld8.fill r26 = [r30], 16; \
  418. ld8.fill r27 = [r31], 16; \
  419. ;; \
  420. ld8.fill r28 = [r30], 16; \
  421. ld8.fill r29 = [r31], 16; \
  422. ;; \
  423. ld8.fill r30 = [r30]; \
  424. ld8.fill r31 = [r31]; \
  425. ;; \
  426. mov ar.unat = clob
  427. #define BSW_1(clob0, clob1) XEN_BSW_1(clob1)
  428. #define COVER \
  429. XEN_HYPER_COVER
  430. #define RFI \
  431. XEN_HYPER_RFI; \
  432. dv_serialize_data