ex-exit.S 3.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2005-2017 Andes Technology Corporation
  3. #include <linux/linkage.h>
  4. #include <asm/unistd.h>
  5. #include <asm/assembler.h>
  6. #include <asm/nds32.h>
  7. #include <asm/asm-offsets.h>
  8. #include <asm/thread_info.h>
  9. #include <asm/current.h>
  10. #ifdef CONFIG_HWZOL
  11. .macro pop_zol
  12. mtusr $r14, $LB
  13. mtusr $r15, $LE
  14. mtusr $r16, $LC
  15. .endm
  16. #endif
  17. .macro restore_user_regs_first
  18. setgie.d
  19. isb
  20. addi $sp, $sp, FUCOP_CTL_OFFSET
  21. lmw.adm $r12, [$sp], $r24, #0x0
  22. mtsr $r12, $SP_USR
  23. mtsr $r13, $IPC
  24. #ifdef CONFIG_HWZOL
  25. pop_zol
  26. #endif
  27. mtsr $r19, $PSW
  28. mtsr $r20, $IPSW
  29. mtsr $r21, $P_IPSW
  30. mtsr $r22, $P_IPC
  31. mtsr $r23, $P_P0
  32. mtsr $r24, $P_P1
  33. lmw.adm $sp, [$sp], $sp, #0xe
  34. .endm
  35. .macro restore_user_regs_last
  36. pop $p0
  37. cmovn $sp, $p0, $p0
  38. iret
  39. nop
  40. .endm
  41. .macro restore_user_regs
  42. restore_user_regs_first
  43. lmw.adm $r0, [$sp], $r25, #0x0
  44. addi $sp, $sp, OSP_OFFSET
  45. restore_user_regs_last
  46. .endm
  47. .macro fast_restore_user_regs
  48. restore_user_regs_first
  49. lmw.adm $r1, [$sp], $r25, #0x0
  50. addi $sp, $sp, OSP_OFFSET-4
  51. restore_user_regs_last
  52. .endm
  53. #ifdef CONFIG_PREEMPT
  54. .macro preempt_stop
  55. .endm
  56. #else
  57. .macro preempt_stop
  58. setgie.d
  59. isb
  60. .endm
  61. #define resume_kernel no_work_pending
  62. #endif
  63. ENTRY(ret_from_exception)
  64. preempt_stop
  65. ENTRY(ret_from_intr)
  66. /*
  67. * judge Kernel or user mode
  68. *
  69. */
  70. lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt
  71. andi $p0, $p0, #PSW_mskINTL
  72. bnez $p0, resume_kernel ! done with iret
  73. j resume_userspace
  74. /*
  75. * This is the fast syscall return path. We do as little as
  76. * possible here, and this includes saving $r0 back into the SVC
  77. * stack.
  78. * fixed: tsk - $r25, syscall # - $r7, syscall table pointer - $r8
  79. */
  80. ENTRY(ret_fast_syscall)
  81. gie_disable
  82. lwi $r1, [tsk+#TSK_TI_FLAGS]
  83. andi $p1, $r1, #_TIF_WORK_MASK
  84. bnez $p1, fast_work_pending
  85. fast_restore_user_regs ! iret
  86. /*
  87. * Ok, we need to do extra processing,
  88. * enter the slow path returning from syscall, while pending work.
  89. */
  90. fast_work_pending:
  91. swi $r0, [$sp+(#R0_OFFSET)] ! what is different from ret_from_exception
  92. work_pending:
  93. andi $p1, $r1, #_TIF_NEED_RESCHED
  94. bnez $p1, work_resched
  95. andi $p1, $r1, #_TIF_SIGPENDING|#_TIF_NOTIFY_RESUME
  96. beqz $p1, no_work_pending
  97. move $r0, $sp ! 'regs'
  98. gie_enable
  99. bal do_notify_resume
  100. b ret_slow_syscall
  101. work_resched:
  102. bal schedule ! path, return to user mode
  103. /*
  104. * "slow" syscall return path.
  105. */
  106. ENTRY(resume_userspace)
  107. ENTRY(ret_slow_syscall)
  108. gie_disable
  109. lwi $p0, [$sp+(#IPSW_OFFSET)] ! Check if in nested interrupt
  110. andi $p0, $p0, #PSW_mskINTL
  111. bnez $p0, no_work_pending ! done with iret
  112. lwi $r1, [tsk+#TSK_TI_FLAGS]
  113. andi $p1, $r1, #_TIF_WORK_MASK
  114. bnez $p1, work_pending ! handle work_resched, sig_pend
  115. no_work_pending:
  116. #ifdef CONFIG_TRACE_IRQFLAGS
  117. lwi $p0, [$sp+(#IPSW_OFFSET)]
  118. andi $p0, $p0, #0x1
  119. la $r10, __trace_hardirqs_off
  120. la $r9, __trace_hardirqs_on
  121. cmovz $r9, $p0, $r10
  122. jral $r9
  123. #endif
  124. restore_user_regs ! return from iret
  125. /*
  126. * preemptive kernel
  127. */
  128. #ifdef CONFIG_PREEMPT
  129. resume_kernel:
  130. gie_disable
  131. lwi $t0, [tsk+#TSK_TI_PREEMPT]
  132. bnez $t0, no_work_pending
  133. need_resched:
  134. lwi $t0, [tsk+#TSK_TI_FLAGS]
  135. andi $p1, $t0, #_TIF_NEED_RESCHED
  136. beqz $p1, no_work_pending
  137. lwi $t0, [$sp+(#IPSW_OFFSET)] ! Interrupts off?
  138. andi $t0, $t0, #1
  139. beqz $t0, no_work_pending
  140. jal preempt_schedule_irq
  141. b need_resched
  142. #endif
  143. /*
  144. * This is how we return from a fork.
  145. */
  146. ENTRY(ret_from_fork)
  147. bal schedule_tail
  148. beqz $r6, 1f ! r6 stores fn for kernel thread
  149. move $r0, $r7 ! prepare kernel thread arg
  150. jral $r6
  151. 1:
  152. lwi $r1, [tsk+#TSK_TI_FLAGS] ! check for syscall tracing
  153. andi $p1, $r1, #_TIF_WORK_SYSCALL_LEAVE ! are we tracing syscalls?
  154. beqz $p1, ret_slow_syscall
  155. move $r0, $sp
  156. bal syscall_trace_leave
  157. b ret_slow_syscall