stackframe.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492
  1. /*
  2. * This file is subject to the terms and conditions of the GNU General Public
  3. * License. See the file "COPYING" in the main directory of this archive
  4. * for more details.
  5. *
  6. * Copyright (C) 1994, 95, 96, 99, 2001 Ralf Baechle
  7. * Copyright (C) 1994, 1995, 1996 Paul M. Antoine.
  8. * Copyright (C) 1999 Silicon Graphics, Inc.
  9. * Copyright (C) 2007 Maciej W. Rozycki
  10. */
  11. #ifndef _ASM_STACKFRAME_H
  12. #define _ASM_STACKFRAME_H
  13. #include <linux/threads.h>
  14. #include <asm/asm.h>
  15. #include <asm/asmmacro.h>
  16. #include <asm/mipsregs.h>
  17. #include <asm/asm-offsets.h>
  18. #include <asm/thread_info.h>
  19. /* Make the addition of cfi info a little easier. */
  20. .macro cfi_rel_offset reg offset=0 docfi=0
  21. .if \docfi
  22. .cfi_rel_offset \reg, \offset
  23. .endif
  24. .endm
  25. .macro cfi_st reg offset=0 docfi=0
  26. LONG_S \reg, \offset(sp)
  27. cfi_rel_offset \reg, \offset, \docfi
  28. .endm
  29. .macro cfi_restore reg offset=0 docfi=0
  30. .if \docfi
  31. .cfi_restore \reg
  32. .endif
  33. .endm
  34. .macro cfi_ld reg offset=0 docfi=0
  35. LONG_L \reg, \offset(sp)
  36. cfi_restore \reg \offset \docfi
  37. .endm
  38. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  39. #define STATMASK 0x3f
  40. #else
  41. #define STATMASK 0x1f
  42. #endif
  43. .macro SAVE_AT docfi=0
  44. .set push
  45. .set noat
  46. cfi_st $1, PT_R1, \docfi
  47. .set pop
  48. .endm
  49. .macro SAVE_TEMP docfi=0
  50. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  51. mflhxu v1
  52. LONG_S v1, PT_LO(sp)
  53. mflhxu v1
  54. LONG_S v1, PT_HI(sp)
  55. mflhxu v1
  56. LONG_S v1, PT_ACX(sp)
  57. #elif !defined(CONFIG_CPU_MIPSR6)
  58. mfhi v1
  59. #endif
  60. #ifdef CONFIG_32BIT
  61. cfi_st $8, PT_R8, \docfi
  62. cfi_st $9, PT_R9, \docfi
  63. #endif
  64. cfi_st $10, PT_R10, \docfi
  65. cfi_st $11, PT_R11, \docfi
  66. cfi_st $12, PT_R12, \docfi
  67. #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
  68. LONG_S v1, PT_HI(sp)
  69. mflo v1
  70. #endif
  71. cfi_st $13, PT_R13, \docfi
  72. cfi_st $14, PT_R14, \docfi
  73. cfi_st $15, PT_R15, \docfi
  74. cfi_st $24, PT_R24, \docfi
  75. #if !defined(CONFIG_CPU_HAS_SMARTMIPS) && !defined(CONFIG_CPU_MIPSR6)
  76. LONG_S v1, PT_LO(sp)
  77. #endif
  78. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  79. /*
  80. * The Octeon multiplier state is affected by general
  81. * multiply instructions. It must be saved before and
  82. * kernel code might corrupt it
  83. */
  84. jal octeon_mult_save
  85. #endif
  86. .endm
  87. .macro SAVE_STATIC docfi=0
  88. cfi_st $16, PT_R16, \docfi
  89. cfi_st $17, PT_R17, \docfi
  90. cfi_st $18, PT_R18, \docfi
  91. cfi_st $19, PT_R19, \docfi
  92. cfi_st $20, PT_R20, \docfi
  93. cfi_st $21, PT_R21, \docfi
  94. cfi_st $22, PT_R22, \docfi
  95. cfi_st $23, PT_R23, \docfi
  96. cfi_st $30, PT_R30, \docfi
  97. .endm
  98. /*
  99. * get_saved_sp returns the SP for the current CPU by looking in the
  100. * kernelsp array for it. If tosp is set, it stores the current sp in
  101. * k0 and loads the new value in sp. If not, it clobbers k0 and
  102. * stores the new value in k1, leaving sp unaffected.
  103. */
  104. #ifdef CONFIG_SMP
  105. /* SMP variation */
  106. .macro get_saved_sp docfi=0 tosp=0
  107. ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
  108. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  109. lui k1, %hi(kernelsp)
  110. #else
  111. lui k1, %highest(kernelsp)
  112. daddiu k1, %higher(kernelsp)
  113. dsll k1, 16
  114. daddiu k1, %hi(kernelsp)
  115. dsll k1, 16
  116. #endif
  117. LONG_SRL k0, SMP_CPUID_PTRSHIFT
  118. LONG_ADDU k1, k0
  119. .if \tosp
  120. move k0, sp
  121. .if \docfi
  122. .cfi_register sp, k0
  123. .endif
  124. LONG_L sp, %lo(kernelsp)(k1)
  125. .else
  126. LONG_L k1, %lo(kernelsp)(k1)
  127. .endif
  128. .endm
  129. .macro set_saved_sp stackp temp temp2
  130. ASM_CPUID_MFC0 \temp, ASM_SMP_CPUID_REG
  131. LONG_SRL \temp, SMP_CPUID_PTRSHIFT
  132. LONG_S \stackp, kernelsp(\temp)
  133. .endm
  134. #else /* !CONFIG_SMP */
  135. /* Uniprocessor variation */
  136. .macro get_saved_sp docfi=0 tosp=0
  137. #ifdef CONFIG_CPU_JUMP_WORKAROUNDS
  138. /*
  139. * Clear BTB (branch target buffer), forbid RAS (return address
  140. * stack) to workaround the Out-of-order Issue in Loongson2F
  141. * via its diagnostic register.
  142. */
  143. move k0, ra
  144. jal 1f
  145. nop
  146. 1: jal 1f
  147. nop
  148. 1: jal 1f
  149. nop
  150. 1: jal 1f
  151. nop
  152. 1: move ra, k0
  153. li k0, 3
  154. mtc0 k0, $22
  155. #endif /* CONFIG_CPU_JUMP_WORKAROUNDS */
  156. #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
  157. lui k1, %hi(kernelsp)
  158. #else
  159. lui k1, %highest(kernelsp)
  160. daddiu k1, %higher(kernelsp)
  161. dsll k1, k1, 16
  162. daddiu k1, %hi(kernelsp)
  163. dsll k1, k1, 16
  164. #endif
  165. .if \tosp
  166. move k0, sp
  167. .if \docfi
  168. .cfi_register sp, k0
  169. .endif
  170. LONG_L sp, %lo(kernelsp)(k1)
  171. .else
  172. LONG_L k1, %lo(kernelsp)(k1)
  173. .endif
  174. .endm
  175. .macro set_saved_sp stackp temp temp2
  176. LONG_S \stackp, kernelsp
  177. .endm
  178. #endif
  179. .macro SAVE_SOME docfi=0
  180. .set push
  181. .set noat
  182. .set reorder
  183. mfc0 k0, CP0_STATUS
  184. sll k0, 3 /* extract cu0 bit */
  185. .set noreorder
  186. bltz k0, 8f
  187. move k0, sp
  188. .if \docfi
  189. .cfi_register sp, k0
  190. .endif
  191. #ifdef CONFIG_EVA
  192. /*
  193. * Flush interAptiv's Return Prediction Stack (RPS) by writing
  194. * EntryHi. Toggling Config7.RPS is slower and less portable.
  195. *
  196. * The RPS isn't automatically flushed when exceptions are
  197. * taken, which can result in kernel mode speculative accesses
  198. * to user addresses if the RPS mispredicts. That's harmless
  199. * when user and kernel share the same address space, but with
  200. * EVA the same user segments may be unmapped to kernel mode,
  201. * even containing sensitive MMIO regions or invalid memory.
  202. *
  203. * This can happen when the kernel sets the return address to
  204. * ret_from_* and jr's to the exception handler, which looks
  205. * more like a tail call than a function call. If nested calls
  206. * don't evict the last user address in the RPS, it will
  207. * mispredict the return and fetch from a user controlled
  208. * address into the icache.
  209. *
  210. * More recent EVA-capable cores with MAAR to restrict
  211. * speculative accesses aren't affected.
  212. */
  213. MFC0 k0, CP0_ENTRYHI
  214. MTC0 k0, CP0_ENTRYHI
  215. #endif
  216. .set reorder
  217. /* Called from user mode, new stack. */
  218. get_saved_sp docfi=\docfi tosp=1
  219. 8:
  220. #ifdef CONFIG_CPU_DADDI_WORKAROUNDS
  221. .set at=k1
  222. #endif
  223. PTR_SUBU sp, PT_SIZE
  224. #ifdef CONFIG_CPU_DADDI_WORKAROUNDS
  225. .set noat
  226. #endif
  227. .if \docfi
  228. .cfi_def_cfa sp,0
  229. .endif
  230. cfi_st k0, PT_R29, \docfi
  231. cfi_rel_offset sp, PT_R29, \docfi
  232. cfi_st v1, PT_R3, \docfi
  233. /*
  234. * You might think that you don't need to save $0,
  235. * but the FPU emulator and gdb remote debug stub
  236. * need it to operate correctly
  237. */
  238. LONG_S $0, PT_R0(sp)
  239. mfc0 v1, CP0_STATUS
  240. cfi_st v0, PT_R2, \docfi
  241. LONG_S v1, PT_STATUS(sp)
  242. cfi_st $4, PT_R4, \docfi
  243. mfc0 v1, CP0_CAUSE
  244. cfi_st $5, PT_R5, \docfi
  245. LONG_S v1, PT_CAUSE(sp)
  246. cfi_st $6, PT_R6, \docfi
  247. cfi_st ra, PT_R31, \docfi
  248. MFC0 ra, CP0_EPC
  249. cfi_st $7, PT_R7, \docfi
  250. #ifdef CONFIG_64BIT
  251. cfi_st $8, PT_R8, \docfi
  252. cfi_st $9, PT_R9, \docfi
  253. #endif
  254. LONG_S ra, PT_EPC(sp)
  255. .if \docfi
  256. .cfi_rel_offset ra, PT_EPC
  257. .endif
  258. cfi_st $25, PT_R25, \docfi
  259. cfi_st $28, PT_R28, \docfi
  260. /* Set thread_info if we're coming from user mode */
  261. mfc0 k0, CP0_STATUS
  262. sll k0, 3 /* extract cu0 bit */
  263. bltz k0, 9f
  264. ori $28, sp, _THREAD_MASK
  265. xori $28, _THREAD_MASK
  266. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  267. .set mips64
  268. pref 0, 0($28) /* Prefetch the current pointer */
  269. #endif
  270. 9:
  271. .set pop
  272. .endm
  273. .macro SAVE_ALL docfi=0
  274. SAVE_SOME \docfi
  275. SAVE_AT \docfi
  276. SAVE_TEMP \docfi
  277. SAVE_STATIC \docfi
  278. .endm
  279. .macro RESTORE_AT docfi=0
  280. .set push
  281. .set noat
  282. cfi_ld $1, PT_R1, \docfi
  283. .set pop
  284. .endm
  285. .macro RESTORE_TEMP docfi=0
  286. #ifdef CONFIG_CPU_CAVIUM_OCTEON
  287. /* Restore the Octeon multiplier state */
  288. jal octeon_mult_restore
  289. #endif
  290. #ifdef CONFIG_CPU_HAS_SMARTMIPS
  291. LONG_L $24, PT_ACX(sp)
  292. mtlhx $24
  293. LONG_L $24, PT_HI(sp)
  294. mtlhx $24
  295. LONG_L $24, PT_LO(sp)
  296. mtlhx $24
  297. #elif !defined(CONFIG_CPU_MIPSR6)
  298. LONG_L $24, PT_LO(sp)
  299. mtlo $24
  300. LONG_L $24, PT_HI(sp)
  301. mthi $24
  302. #endif
  303. #ifdef CONFIG_32BIT
  304. cfi_ld $8, PT_R8, \docfi
  305. cfi_ld $9, PT_R9, \docfi
  306. #endif
  307. cfi_ld $10, PT_R10, \docfi
  308. cfi_ld $11, PT_R11, \docfi
  309. cfi_ld $12, PT_R12, \docfi
  310. cfi_ld $13, PT_R13, \docfi
  311. cfi_ld $14, PT_R14, \docfi
  312. cfi_ld $15, PT_R15, \docfi
  313. cfi_ld $24, PT_R24, \docfi
  314. .endm
  315. .macro RESTORE_STATIC docfi=0
  316. cfi_ld $16, PT_R16, \docfi
  317. cfi_ld $17, PT_R17, \docfi
  318. cfi_ld $18, PT_R18, \docfi
  319. cfi_ld $19, PT_R19, \docfi
  320. cfi_ld $20, PT_R20, \docfi
  321. cfi_ld $21, PT_R21, \docfi
  322. cfi_ld $22, PT_R22, \docfi
  323. cfi_ld $23, PT_R23, \docfi
  324. cfi_ld $30, PT_R30, \docfi
  325. .endm
  326. .macro RESTORE_SP docfi=0
  327. cfi_ld sp, PT_R29, \docfi
  328. .endm
  329. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  330. .macro RESTORE_SOME docfi=0
  331. .set push
  332. .set reorder
  333. .set noat
  334. mfc0 a0, CP0_STATUS
  335. li v1, ST0_CU1 | ST0_IM
  336. ori a0, STATMASK
  337. xori a0, STATMASK
  338. mtc0 a0, CP0_STATUS
  339. and a0, v1
  340. LONG_L v0, PT_STATUS(sp)
  341. nor v1, $0, v1
  342. and v0, v1
  343. or v0, a0
  344. mtc0 v0, CP0_STATUS
  345. cfi_ld $31, PT_R31, \docfi
  346. cfi_ld $28, PT_R28, \docfi
  347. cfi_ld $25, PT_R25, \docfi
  348. cfi_ld $7, PT_R7, \docfi
  349. cfi_ld $6, PT_R6, \docfi
  350. cfi_ld $5, PT_R5, \docfi
  351. cfi_ld $4, PT_R4, \docfi
  352. cfi_ld $3, PT_R3, \docfi
  353. cfi_ld $2, PT_R2, \docfi
  354. .set pop
  355. .endm
  356. .macro RESTORE_SP_AND_RET docfi=0
  357. .set push
  358. .set noreorder
  359. LONG_L k0, PT_EPC(sp)
  360. RESTORE_SP \docfi
  361. jr k0
  362. rfe
  363. .set pop
  364. .endm
  365. #else
  366. .macro RESTORE_SOME docfi=0
  367. .set push
  368. .set reorder
  369. .set noat
  370. mfc0 a0, CP0_STATUS
  371. ori a0, STATMASK
  372. xori a0, STATMASK
  373. mtc0 a0, CP0_STATUS
  374. li v1, ST0_CU1 | ST0_FR | ST0_IM
  375. and a0, v1
  376. LONG_L v0, PT_STATUS(sp)
  377. nor v1, $0, v1
  378. and v0, v1
  379. or v0, a0
  380. mtc0 v0, CP0_STATUS
  381. LONG_L v1, PT_EPC(sp)
  382. MTC0 v1, CP0_EPC
  383. cfi_ld $31, PT_R31, \docfi
  384. cfi_ld $28, PT_R28, \docfi
  385. cfi_ld $25, PT_R25, \docfi
  386. #ifdef CONFIG_64BIT
  387. cfi_ld $8, PT_R8, \docfi
  388. cfi_ld $9, PT_R9, \docfi
  389. #endif
  390. cfi_ld $7, PT_R7, \docfi
  391. cfi_ld $6, PT_R6, \docfi
  392. cfi_ld $5, PT_R5, \docfi
  393. cfi_ld $4, PT_R4, \docfi
  394. cfi_ld $3, PT_R3, \docfi
  395. cfi_ld $2, PT_R2, \docfi
  396. .set pop
  397. .endm
  398. .macro RESTORE_SP_AND_RET docfi=0
  399. RESTORE_SP \docfi
  400. #ifdef CONFIG_CPU_MIPSR6
  401. eretnc
  402. #else
  403. .set arch=r4000
  404. eret
  405. .set mips0
  406. #endif
  407. .endm
  408. #endif
  409. .macro RESTORE_ALL docfi=0
  410. RESTORE_TEMP \docfi
  411. RESTORE_STATIC \docfi
  412. RESTORE_AT \docfi
  413. RESTORE_SOME \docfi
  414. RESTORE_SP \docfi
  415. .endm
  416. /*
  417. * Move to kernel mode and disable interrupts.
  418. * Set cp0 enable bit as sign that we're running on the kernel stack
  419. */
  420. .macro CLI
  421. mfc0 t0, CP0_STATUS
  422. li t1, ST0_CU0 | STATMASK
  423. or t0, t1
  424. xori t0, STATMASK
  425. mtc0 t0, CP0_STATUS
  426. irq_disable_hazard
  427. .endm
  428. /*
  429. * Move to kernel mode and enable interrupts.
  430. * Set cp0 enable bit as sign that we're running on the kernel stack
  431. */
  432. .macro STI
  433. mfc0 t0, CP0_STATUS
  434. li t1, ST0_CU0 | STATMASK
  435. or t0, t1
  436. xori t0, STATMASK & ~1
  437. mtc0 t0, CP0_STATUS
  438. irq_enable_hazard
  439. .endm
  440. /*
  441. * Just move to kernel mode and leave interrupts as they are. Note
  442. * for the R3000 this means copying the previous enable from IEp.
  443. * Set cp0 enable bit as sign that we're running on the kernel stack
  444. */
  445. .macro KMODE
  446. mfc0 t0, CP0_STATUS
  447. li t1, ST0_CU0 | (STATMASK & ~1)
  448. #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
  449. andi t2, t0, ST0_IEP
  450. srl t2, 2
  451. or t0, t2
  452. #endif
  453. or t0, t1
  454. xori t0, STATMASK & ~1
  455. mtc0 t0, CP0_STATUS
  456. irq_disable_hazard
  457. .endm
  458. #endif /* _ASM_STACKFRAME_H */