0005-cortex-m0-Use-assembly-exception-handlers-for-task-s.patch 8.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240
  1. From 27501308493bf2adadfc3b133fd1d6f4b4feec12 Mon Sep 17 00:00:00 2001
  2. From: Paul Kocialkowski <contact@paulk.fr>
  3. Date: Sat, 23 Jul 2016 14:17:32 +0200
  4. Subject: [PATCH] cortex-m0: Use assembly exception handlers for task switching
  5. The way Cortex processors handle exceptions allows writing exception
  6. routines directly in C, as return from exception is handled by providing
  7. a special value for the link register.
  8. However, it is not safe to do this when doing context switching. In
  9. particular, C handlers may push some general-purpose registers that
  10. are used by the handler and pop them later, even when context switch
  11. has happened in the meantime. While the processor will restore {r0-r3}
  12. from the stack when returning from an exception, the C handler code
  13. may push, use and pop another register, such as r4.
  14. It turns out that GCC 4.8 would generally only use r3 in svc_handler and
  15. pendsv_handler, but newer versions tend to use r4, thus clobbering r4
  16. that was restored from the context switch and leading up to a fault
  17. when r4 is used by the task code.
  18. An occurrence of this behaviour takes place with GCC > 4.8 in __wait_evt,
  19. where "me" is stored in r4, which gets clobbered after an exception
  20. triggers pendsv_handler. The exception handler uses r4 internally, does
  21. a context switch and then restores the previous value of r4, which is
  22. not restored by the processor's internal, thus clobbering r4.
  23. This ends up with the following assertion failure:
  24. 'tskid < TASK_ID_COUNT' in timer_cancel() at common/timer.c:137
  25. For this reason, it is safer to have assembly routines for exception
  26. handlers that do context switching.
  27. BUG=chromium:631514
  28. BRANCH=None
  29. TEST=Build and run speedy EC with a recent GCC version
  30. Change-Id: Ib068bc12ce2204aee3e0f563efcb94f15aa87013
  31. Signed-off-by: Paul Kocialkowski <contact@paulk.fr>
  32. ---
  33. diff --git a/core/cortex-m0/switch.S b/core/cortex-m0/switch.S
  34. index 95ea29e..d4b47cd 100644
  35. --- a/core/cortex-m0/switch.S
  36. +++ b/core/cortex-m0/switch.S
  37. @@ -7,55 +7,14 @@
  38. #include "config.h"
  39. +#define CPU_SCB_ICSR 0xe000ed04
  40. +
  41. .text
  42. .syntax unified
  43. .code 16
  44. /**
  45. - * Task context switching
  46. - *
  47. - * Change the task scheduled after returning from the exception.
  48. - *
  49. - * Save the registers of the current task below the exception context on
  50. - * its task, then restore the live registers of the next task and set the
  51. - * process stack pointer to the new stack.
  52. - *
  53. - * r0: pointer to the task to switch from
  54. - * r1: pointer to the task to switch to
  55. - *
  56. - * must be called from interrupt context
  57. - *
  58. - * the structure of the saved context on the stack is :
  59. - * r8, r9, r10, r11, r4, r5, r6, r7, r0, r1, r2, r3, r12, lr, pc, psr
  60. - * additional registers <|> exception frame
  61. - */
  62. -.global __switchto
  63. -.thumb_func
  64. -__switchto:
  65. - mrs r2, psp @ get the task stack where the context has been saved
  66. - mov r3, sp
  67. - mov sp, r2
  68. - push {r4-r7} @ save additional r4-r7 in the task stack
  69. - mov r4, r8
  70. - mov r5, r9
  71. - mov r6, r10
  72. - mov r7, r11
  73. - push {r4-r7} @ save additional r8-r11 in the task stack
  74. - mov r2, sp @ prepare to save former task stack pointer
  75. - mov sp, r3 @ restore system stack pointer
  76. - str r2, [r0] @ save the task stack pointer in its context
  77. - ldr r2, [r1] @ get the new scheduled task stack pointer
  78. - ldmia r2!, {r4-r7} @ restore r8-r11 for the next task context
  79. - mov r8, r4
  80. - mov r9, r5
  81. - mov r10, r6
  82. - mov r11, r7
  83. - ldmia r2!, {r4-r7} @ restore r4-r7 for the next task context
  84. - msr psp, r2 @ set the process stack pointer to exception context
  85. - bx lr @ return from exception
  86. -
  87. -/**
  88. * Start the task scheduling. r0 is a pointer to task_stack_ready, which is
  89. * set to 1 after the task stack is set up.
  90. */
  91. @@ -79,3 +38,77 @@
  92. movs r0, #1 @ set to EC_ERROR_UNKNOWN
  93. bx lr
  94. +/**
  95. + * SVC exception handler
  96. + */
  97. +.global svc_handler
  98. +.thumb_func
  99. +svc_handler:
  100. + push {lr} @ save link register
  101. + bl __svc_handler @ call svc handler helper
  102. + ldr r3,=current_task @ load the current task's address
  103. + ldr r1, [r3] @ load the current task
  104. + cmp r0, r1 @ compare with previous task returned by helper
  105. + beq svc_handler_return @ return if they are the same
  106. + /* continue to __switchto to switch to the new task */
  107. +
  108. +/**
  109. + * Task context switching
  110. + *
  111. + * Change the task scheduled after returning from the exception.
  112. + *
  113. + * Save the registers of the current task below the exception context on
  114. + * its task, then restore the live registers of the next task and set the
  115. + * process stack pointer to the new stack.
  116. + *
  117. + * r0: pointer to the task to switch from
  118. + * r1: pointer to the task to switch to
  119. + *
  120. + * must be called from interrupt context
  121. + *
  122. + * the structure of the saved context on the stack is :
  123. + * r8, r9, r10, r11, r4, r5, r6, r7, r0, r1, r2, r3, r12, lr, pc, psr
  124. + * additional registers <|> exception frame
  125. + */
  126. +__switchto:
  127. + mrs r2, psp @ get the task stack where the context has been saved
  128. + mov r3, sp
  129. + mov sp, r2
  130. + push {r4-r7} @ save additional r4-r7 in the task stack
  131. + mov r4, r8
  132. + mov r5, r9
  133. + mov r6, r10
  134. + mov r7, r11
  135. + push {r4-r7} @ save additional r8-r11 in the task stack
  136. + mov r2, sp @ prepare to save former task stack pointer
  137. + mov sp, r3 @ restore system stack pointer
  138. + str r2, [r0] @ save the task stack pointer in its context
  139. + ldr r2, [r1] @ get the new scheduled task stack pointer
  140. + ldmia r2!, {r4-r7} @ restore r8-r11 for the next task context
  141. + mov r8, r4
  142. + mov r9, r5
  143. + mov r10, r6
  144. + mov r11, r7
  145. + ldmia r2!, {r4-r7} @ restore r4-r7 for the next task context
  146. + msr psp, r2 @ set the process stack pointer to exception context
  147. +
  148. +svc_handler_return:
  149. + pop {pc} @ return from exception or return to caller
  150. +
  151. +/**
  152. + * PendSVC exception handler
  153. + */
  154. +.global pendsv_handler
  155. +.thumb_func
  156. +pendsv_handler:
  157. + push {lr} @ save link register
  158. + ldr r0, =#CPU_SCB_ICSR @ load CPU_SCB_ICSR's address
  159. + movs r1, #1 @ prepare left shift (1 << 27)
  160. + lsls r1, #27 @ shift the bit
  161. + str r1, [r0] @ clear pending flag
  162. + cpsid i @ ensure we have priority 0 during re-scheduling
  163. + movs r1, #0 @ desched nothing
  164. + movs r0, #0 @ resched nothing
  165. + bl svc_handler @ re-schedule the highest priority task
  166. + cpsie i @ leave priority 0
  167. + pop {pc} @ return from exception
  168. diff --git a/core/cortex-m0/task.c b/core/cortex-m0/task.c
  169. index 5d219a5..0261261 100644
  170. --- a/core/cortex-m0/task.c
  171. +++ b/core/cortex-m0/task.c
  172. @@ -59,7 +59,6 @@
  173. static uint32_t irq_dist[CONFIG_IRQ_COUNT]; /* Distribution of IRQ calls */
  174. #endif
  175. -extern void __switchto(task_ *from, task_ *to);
  176. extern int __task_start(int *task_stack_ready);
  177. #ifndef CONFIG_LOW_POWER_IDLE
  178. @@ -124,7 +123,7 @@
  179. /* Reserve space to discard context on first context switch. */
  180. uint32_t scratchpad[17];
  181. -static task_ *current_task = (task_ *)scratchpad;
  182. +task_ *current_task = (task_ *)scratchpad;
  183. /*
  184. * Bitmap of all tasks ready to be run.
  185. @@ -254,18 +253,6 @@
  186. return current;
  187. }
  188. -void svc_handler(int desched, task_id_t resched)
  189. -{
  190. - /*
  191. - * The layout of the this routine (and the __svc_handler companion one)
  192. - * ensures that we are getting the right tail call optimization from
  193. - * the compiler.
  194. - */
  195. - task_ *prev = __svc_handler(desched, resched);
  196. - if (current_task != prev)
  197. - __switchto(prev, current_task);
  198. -}
  199. -
  200. void __schedule(int desched, int resched)
  201. {
  202. register int p0 asm("r0") = desched;
  203. @@ -274,18 +261,6 @@
  204. asm("svc 0" : : "r"(p0), "r"(p1));
  205. }
  206. -void pendsv_handler(void)
  207. -{
  208. - /* Clear pending flag */
  209. - CPU_SCB_ICSR = (1 << 27);
  210. -
  211. - /* ensure we have priority 0 during re-scheduling */
  212. - __asm__ __volatile__("cpsid i");
  213. - /* re-schedule the highest priority task */
  214. - svc_handler(0, 0);
  215. - __asm__ __volatile__("cpsie i");
  216. -}
  217. -
  218. #ifdef CONFIG_TASK_PROFILING
  219. void task_start_irq_handler(void *excep_return)
  220. {