mca_asm.h 7.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245
  1. /*
  2. * File: mca_asm.h
  3. * Purpose: Machine check handling specific defines
  4. *
  5. * Copyright (C) 1999 Silicon Graphics, Inc.
  6. * Copyright (C) Vijay Chander <vijay@engr.sgi.com>
  7. * Copyright (C) Srinivasa Thirumalachar <sprasad@engr.sgi.com>
  8. * Copyright (C) 2000 Hewlett-Packard Co.
  9. * Copyright (C) 2000 David Mosberger-Tang <davidm@hpl.hp.com>
  10. * Copyright (C) 2002 Intel Corp.
  11. * Copyright (C) 2002 Jenna Hall <jenna.s.hall@intel.com>
  12. * Copyright (C) 2005 Silicon Graphics, Inc
  13. * Copyright (C) 2005 Keith Owens <kaos@sgi.com>
  14. */
  15. #ifndef _ASM_IA64_MCA_ASM_H
  16. #define _ASM_IA64_MCA_ASM_H
  17. #include <asm/percpu.h>
  18. #define PSR_IC 13
  19. #define PSR_I 14
  20. #define PSR_DT 17
  21. #define PSR_RT 27
  22. #define PSR_MC 35
  23. #define PSR_IT 36
  24. #define PSR_BN 44
  25. /*
  26. * This macro converts a instruction virtual address to a physical address
  27. * Right now for simulation purposes the virtual addresses are
  28. * direct mapped to physical addresses.
  29. * 1. Lop off bits 61 thru 63 in the virtual address
  30. */
  31. #define INST_VA_TO_PA(addr) \
  32. dep addr = 0, addr, 61, 3
  33. /*
  34. * This macro converts a data virtual address to a physical address
  35. * Right now for simulation purposes the virtual addresses are
  36. * direct mapped to physical addresses.
  37. * 1. Lop off bits 61 thru 63 in the virtual address
  38. */
  39. #define DATA_VA_TO_PA(addr) \
  40. tpa addr = addr
  41. /*
  42. * This macro converts a data physical address to a virtual address
  43. * Right now for simulation purposes the virtual addresses are
  44. * direct mapped to physical addresses.
  45. * 1. Put 0x7 in bits 61 thru 63.
  46. */
  47. #define DATA_PA_TO_VA(addr,temp) \
  48. mov temp = 0x7 ;; \
  49. dep addr = temp, addr, 61, 3
  50. #define GET_THIS_PADDR(reg, var) \
  51. mov reg = IA64_KR(PER_CPU_DATA);; \
  52. addl reg = THIS_CPU(var), reg
  53. /*
  54. * This macro jumps to the instruction at the given virtual address
  55. * and starts execution in physical mode with all the address
  56. * translations turned off.
  57. * 1. Save the current psr
  58. * 2. Make sure that all the upper 32 bits are off
  59. *
  60. * 3. Clear the interrupt enable and interrupt state collection bits
  61. * in the psr before updating the ipsr and iip.
  62. *
  63. * 4. Turn off the instruction, data and rse translation bits of the psr
  64. * and store the new value into ipsr
  65. * Also make sure that the interrupts are disabled.
  66. * Ensure that we are in little endian mode.
  67. * [psr.{rt, it, dt, i, be} = 0]
  68. *
  69. * 5. Get the physical address corresponding to the virtual address
  70. * of the next instruction bundle and put it in iip.
  71. * (Using magic numbers 24 and 40 in the deposint instruction since
  72. * the IA64_SDK code directly maps to lower 24bits as physical address
  73. * from a virtual address).
  74. *
  75. * 6. Do an rfi to move the values from ipsr to psr and iip to ip.
  76. */
  77. #define PHYSICAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
  78. mov old_psr = psr; \
  79. ;; \
  80. dep old_psr = 0, old_psr, 32, 32; \
  81. \
  82. mov ar.rsc = 0 ; \
  83. ;; \
  84. srlz.d; \
  85. mov temp2 = ar.bspstore; \
  86. ;; \
  87. DATA_VA_TO_PA(temp2); \
  88. ;; \
  89. mov temp1 = ar.rnat; \
  90. ;; \
  91. mov ar.bspstore = temp2; \
  92. ;; \
  93. mov ar.rnat = temp1; \
  94. mov temp1 = psr; \
  95. mov temp2 = psr; \
  96. ;; \
  97. \
  98. dep temp2 = 0, temp2, PSR_IC, 2; \
  99. ;; \
  100. mov psr.l = temp2; \
  101. ;; \
  102. srlz.d; \
  103. dep temp1 = 0, temp1, 32, 32; \
  104. ;; \
  105. dep temp1 = 0, temp1, PSR_IT, 1; \
  106. ;; \
  107. dep temp1 = 0, temp1, PSR_DT, 1; \
  108. ;; \
  109. dep temp1 = 0, temp1, PSR_RT, 1; \
  110. ;; \
  111. dep temp1 = 0, temp1, PSR_I, 1; \
  112. ;; \
  113. dep temp1 = 0, temp1, PSR_IC, 1; \
  114. ;; \
  115. dep temp1 = -1, temp1, PSR_MC, 1; \
  116. ;; \
  117. mov cr.ipsr = temp1; \
  118. ;; \
  119. LOAD_PHYSICAL(p0, temp2, start_addr); \
  120. ;; \
  121. mov cr.iip = temp2; \
  122. mov cr.ifs = r0; \
  123. DATA_VA_TO_PA(sp); \
  124. DATA_VA_TO_PA(gp); \
  125. ;; \
  126. srlz.i; \
  127. ;; \
  128. nop 1; \
  129. nop 2; \
  130. nop 1; \
  131. nop 2; \
  132. rfi; \
  133. ;;
  134. /*
  135. * This macro jumps to the instruction at the given virtual address
  136. * and starts execution in virtual mode with all the address
  137. * translations turned on.
  138. * 1. Get the old saved psr
  139. *
  140. * 2. Clear the interrupt state collection bit in the current psr.
  141. *
  142. * 3. Set the instruction translation bit back in the old psr
  143. * Note we have to do this since we are right now saving only the
  144. * lower 32-bits of old psr.(Also the old psr has the data and
  145. * rse translation bits on)
  146. *
  147. * 4. Set ipsr to this old_psr with "it" bit set and "bn" = 1.
  148. *
  149. * 5. Reset the current thread pointer (r13).
  150. *
  151. * 6. Set iip to the virtual address of the next instruction bundle.
  152. *
  153. * 7. Do an rfi to move ipsr to psr and iip to ip.
  154. */
  155. #define VIRTUAL_MODE_ENTER(temp1, temp2, start_addr, old_psr) \
  156. mov temp2 = psr; \
  157. ;; \
  158. mov old_psr = temp2; \
  159. ;; \
  160. dep temp2 = 0, temp2, PSR_IC, 2; \
  161. ;; \
  162. mov psr.l = temp2; \
  163. mov ar.rsc = 0; \
  164. ;; \
  165. srlz.d; \
  166. mov r13 = ar.k6; \
  167. mov temp2 = ar.bspstore; \
  168. ;; \
  169. DATA_PA_TO_VA(temp2,temp1); \
  170. ;; \
  171. mov temp1 = ar.rnat; \
  172. ;; \
  173. mov ar.bspstore = temp2; \
  174. ;; \
  175. mov ar.rnat = temp1; \
  176. ;; \
  177. mov temp1 = old_psr; \
  178. ;; \
  179. mov temp2 = 1; \
  180. ;; \
  181. dep temp1 = temp2, temp1, PSR_IC, 1; \
  182. ;; \
  183. dep temp1 = temp2, temp1, PSR_IT, 1; \
  184. ;; \
  185. dep temp1 = temp2, temp1, PSR_DT, 1; \
  186. ;; \
  187. dep temp1 = temp2, temp1, PSR_RT, 1; \
  188. ;; \
  189. dep temp1 = temp2, temp1, PSR_BN, 1; \
  190. ;; \
  191. \
  192. mov cr.ipsr = temp1; \
  193. movl temp2 = start_addr; \
  194. ;; \
  195. mov cr.iip = temp2; \
  196. movl gp = __gp \
  197. ;; \
  198. DATA_PA_TO_VA(sp, temp1); \
  199. srlz.i; \
  200. ;; \
  201. nop 1; \
  202. nop 2; \
  203. nop 1; \
  204. rfi \
  205. ;;
  206. /*
  207. * The MCA and INIT stacks in struct ia64_mca_cpu look like normal kernel
  208. * stacks, except that the SAL/OS state and a switch_stack are stored near the
  209. * top of the MCA/INIT stack. To support concurrent entry to MCA or INIT, as
  210. * well as MCA over INIT, each event needs its own SAL/OS state. All entries
  211. * are 16 byte aligned.
  212. *
  213. * +---------------------------+
  214. * | pt_regs |
  215. * +---------------------------+
  216. * | switch_stack |
  217. * +---------------------------+
  218. * | SAL/OS state |
  219. * +---------------------------+
  220. * | 16 byte scratch area |
  221. * +---------------------------+ <-------- SP at start of C MCA handler
  222. * | ..... |
  223. * +---------------------------+
  224. * | RBS for MCA/INIT handler |
  225. * +---------------------------+
  226. * | struct task for MCA/INIT |
  227. * +---------------------------+ <-------- Bottom of MCA/INIT stack
  228. */
  229. #define ALIGN16(x) ((x)&~15)
  230. #define MCA_PT_REGS_OFFSET ALIGN16(KERNEL_STACK_SIZE-IA64_PT_REGS_SIZE)
  231. #define MCA_SWITCH_STACK_OFFSET ALIGN16(MCA_PT_REGS_OFFSET-IA64_SWITCH_STACK_SIZE)
  232. #define MCA_SOS_OFFSET ALIGN16(MCA_SWITCH_STACK_OFFSET-IA64_SAL_OS_STATE_SIZE)
  233. #define MCA_SP_OFFSET ALIGN16(MCA_SOS_OFFSET-16)
  234. #endif /* _ASM_IA64_MCA_ASM_H */