hw_breakpoint.c 7.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318
  1. /*
  2. * Xtensa hardware breakpoints/watchpoints handling functions
  3. *
  4. * This file is subject to the terms and conditions of the GNU General Public
  5. * License. See the file "COPYING" in the main directory of this archive
  6. * for more details.
  7. *
  8. * Copyright (C) 2016 Cadence Design Systems Inc.
  9. */
  10. #include <linux/hw_breakpoint.h>
  11. #include <linux/log2.h>
  12. #include <linux/percpu.h>
  13. #include <linux/perf_event.h>
  14. #include <variant/core.h>
  15. /* Breakpoint currently in use for each IBREAKA. */
  16. static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[XCHAL_NUM_IBREAK]);
  17. /* Watchpoint currently in use for each DBREAKA. */
  18. static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[XCHAL_NUM_DBREAK]);
  19. int hw_breakpoint_slots(int type)
  20. {
  21. switch (type) {
  22. case TYPE_INST:
  23. return XCHAL_NUM_IBREAK;
  24. case TYPE_DATA:
  25. return XCHAL_NUM_DBREAK;
  26. default:
  27. pr_warn("unknown slot type: %d\n", type);
  28. return 0;
  29. }
  30. }
  31. int arch_check_bp_in_kernelspace(struct perf_event *bp)
  32. {
  33. unsigned int len;
  34. unsigned long va;
  35. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  36. va = info->address;
  37. len = bp->attr.bp_len;
  38. return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
  39. }
  40. /*
  41. * Construct an arch_hw_breakpoint from a perf_event.
  42. */
  43. static int arch_build_bp_info(struct perf_event *bp)
  44. {
  45. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  46. /* Type */
  47. switch (bp->attr.bp_type) {
  48. case HW_BREAKPOINT_X:
  49. info->type = XTENSA_BREAKPOINT_EXECUTE;
  50. break;
  51. case HW_BREAKPOINT_R:
  52. info->type = XTENSA_BREAKPOINT_LOAD;
  53. break;
  54. case HW_BREAKPOINT_W:
  55. info->type = XTENSA_BREAKPOINT_STORE;
  56. break;
  57. case HW_BREAKPOINT_RW:
  58. info->type = XTENSA_BREAKPOINT_LOAD | XTENSA_BREAKPOINT_STORE;
  59. break;
  60. default:
  61. return -EINVAL;
  62. }
  63. /* Len */
  64. info->len = bp->attr.bp_len;
  65. if (info->len < 1 || info->len > 64 || !is_power_of_2(info->len))
  66. return -EINVAL;
  67. /* Address */
  68. info->address = bp->attr.bp_addr;
  69. if (info->address & (info->len - 1))
  70. return -EINVAL;
  71. return 0;
  72. }
  73. int arch_validate_hwbkpt_settings(struct perf_event *bp)
  74. {
  75. int ret;
  76. /* Build the arch_hw_breakpoint. */
  77. ret = arch_build_bp_info(bp);
  78. return ret;
  79. }
  80. int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
  81. unsigned long val, void *data)
  82. {
  83. return NOTIFY_DONE;
  84. }
  85. static void xtensa_wsr(unsigned long v, u8 sr)
  86. {
  87. /* We don't have indexed wsr and creating instruction dynamically
  88. * doesn't seem worth it given how small XCHAL_NUM_IBREAK and
  89. * XCHAL_NUM_DBREAK are. Thus the switch. In case build breaks here
  90. * the switch below needs to be extended.
  91. */
  92. BUILD_BUG_ON(XCHAL_NUM_IBREAK > 2);
  93. BUILD_BUG_ON(XCHAL_NUM_DBREAK > 2);
  94. switch (sr) {
  95. #if XCHAL_NUM_IBREAK > 0
  96. case SREG_IBREAKA + 0:
  97. WSR(v, SREG_IBREAKA + 0);
  98. break;
  99. #endif
  100. #if XCHAL_NUM_IBREAK > 1
  101. case SREG_IBREAKA + 1:
  102. WSR(v, SREG_IBREAKA + 1);
  103. break;
  104. #endif
  105. #if XCHAL_NUM_DBREAK > 0
  106. case SREG_DBREAKA + 0:
  107. WSR(v, SREG_DBREAKA + 0);
  108. break;
  109. case SREG_DBREAKC + 0:
  110. WSR(v, SREG_DBREAKC + 0);
  111. break;
  112. #endif
  113. #if XCHAL_NUM_DBREAK > 1
  114. case SREG_DBREAKA + 1:
  115. WSR(v, SREG_DBREAKA + 1);
  116. break;
  117. case SREG_DBREAKC + 1:
  118. WSR(v, SREG_DBREAKC + 1);
  119. break;
  120. #endif
  121. }
  122. }
  123. static int alloc_slot(struct perf_event **slot, size_t n,
  124. struct perf_event *bp)
  125. {
  126. size_t i;
  127. for (i = 0; i < n; ++i) {
  128. if (!slot[i]) {
  129. slot[i] = bp;
  130. return i;
  131. }
  132. }
  133. return -EBUSY;
  134. }
  135. static void set_ibreak_regs(int reg, struct perf_event *bp)
  136. {
  137. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  138. unsigned long ibreakenable;
  139. xtensa_wsr(info->address, SREG_IBREAKA + reg);
  140. RSR(ibreakenable, SREG_IBREAKENABLE);
  141. WSR(ibreakenable | (1 << reg), SREG_IBREAKENABLE);
  142. }
  143. static void set_dbreak_regs(int reg, struct perf_event *bp)
  144. {
  145. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  146. unsigned long dbreakc = DBREAKC_MASK_MASK & -info->len;
  147. if (info->type & XTENSA_BREAKPOINT_LOAD)
  148. dbreakc |= DBREAKC_LOAD_MASK;
  149. if (info->type & XTENSA_BREAKPOINT_STORE)
  150. dbreakc |= DBREAKC_STOR_MASK;
  151. xtensa_wsr(info->address, SREG_DBREAKA + reg);
  152. xtensa_wsr(dbreakc, SREG_DBREAKC + reg);
  153. }
  154. int arch_install_hw_breakpoint(struct perf_event *bp)
  155. {
  156. int i;
  157. if (counter_arch_bp(bp)->type == XTENSA_BREAKPOINT_EXECUTE) {
  158. /* Breakpoint */
  159. i = alloc_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp);
  160. if (i < 0)
  161. return i;
  162. set_ibreak_regs(i, bp);
  163. } else {
  164. /* Watchpoint */
  165. i = alloc_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp);
  166. if (i < 0)
  167. return i;
  168. set_dbreak_regs(i, bp);
  169. }
  170. return 0;
  171. }
  172. static int free_slot(struct perf_event **slot, size_t n,
  173. struct perf_event *bp)
  174. {
  175. size_t i;
  176. for (i = 0; i < n; ++i) {
  177. if (slot[i] == bp) {
  178. slot[i] = NULL;
  179. return i;
  180. }
  181. }
  182. return -EBUSY;
  183. }
  184. void arch_uninstall_hw_breakpoint(struct perf_event *bp)
  185. {
  186. struct arch_hw_breakpoint *info = counter_arch_bp(bp);
  187. int i;
  188. if (info->type == XTENSA_BREAKPOINT_EXECUTE) {
  189. unsigned long ibreakenable;
  190. /* Breakpoint */
  191. i = free_slot(this_cpu_ptr(bp_on_reg), XCHAL_NUM_IBREAK, bp);
  192. if (i >= 0) {
  193. RSR(ibreakenable, SREG_IBREAKENABLE);
  194. WSR(ibreakenable & ~(1 << i), SREG_IBREAKENABLE);
  195. }
  196. } else {
  197. /* Watchpoint */
  198. i = free_slot(this_cpu_ptr(wp_on_reg), XCHAL_NUM_DBREAK, bp);
  199. if (i >= 0)
  200. xtensa_wsr(0, SREG_DBREAKC + i);
  201. }
  202. }
  203. void hw_breakpoint_pmu_read(struct perf_event *bp)
  204. {
  205. }
  206. void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
  207. {
  208. int i;
  209. struct thread_struct *t = &tsk->thread;
  210. for (i = 0; i < XCHAL_NUM_IBREAK; ++i) {
  211. if (t->ptrace_bp[i]) {
  212. unregister_hw_breakpoint(t->ptrace_bp[i]);
  213. t->ptrace_bp[i] = NULL;
  214. }
  215. }
  216. for (i = 0; i < XCHAL_NUM_DBREAK; ++i) {
  217. if (t->ptrace_wp[i]) {
  218. unregister_hw_breakpoint(t->ptrace_wp[i]);
  219. t->ptrace_wp[i] = NULL;
  220. }
  221. }
  222. }
  223. /*
  224. * Set ptrace breakpoint pointers to zero for this task.
  225. * This is required in order to prevent child processes from unregistering
  226. * breakpoints held by their parent.
  227. */
  228. void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
  229. {
  230. memset(tsk->thread.ptrace_bp, 0, sizeof(tsk->thread.ptrace_bp));
  231. memset(tsk->thread.ptrace_wp, 0, sizeof(tsk->thread.ptrace_wp));
  232. }
  233. void restore_dbreak(void)
  234. {
  235. int i;
  236. for (i = 0; i < XCHAL_NUM_DBREAK; ++i) {
  237. struct perf_event *bp = this_cpu_ptr(wp_on_reg)[i];
  238. if (bp)
  239. set_dbreak_regs(i, bp);
  240. }
  241. clear_thread_flag(TIF_DB_DISABLED);
  242. }
  243. int check_hw_breakpoint(struct pt_regs *regs)
  244. {
  245. if (regs->debugcause & BIT(DEBUGCAUSE_IBREAK_BIT)) {
  246. int i;
  247. struct perf_event **bp = this_cpu_ptr(bp_on_reg);
  248. for (i = 0; i < XCHAL_NUM_IBREAK; ++i) {
  249. if (bp[i] && !bp[i]->attr.disabled &&
  250. regs->pc == bp[i]->attr.bp_addr)
  251. perf_bp_event(bp[i], regs);
  252. }
  253. return 0;
  254. } else if (regs->debugcause & BIT(DEBUGCAUSE_DBREAK_BIT)) {
  255. struct perf_event **bp = this_cpu_ptr(wp_on_reg);
  256. int dbnum = (regs->debugcause & DEBUGCAUSE_DBNUM_MASK) >>
  257. DEBUGCAUSE_DBNUM_SHIFT;
  258. if (dbnum < XCHAL_NUM_DBREAK && bp[dbnum]) {
  259. if (user_mode(regs)) {
  260. perf_bp_event(bp[dbnum], regs);
  261. } else {
  262. set_thread_flag(TIF_DB_DISABLED);
  263. xtensa_wsr(0, SREG_DBREAKC + dbnum);
  264. }
  265. } else {
  266. WARN_ONCE(1,
  267. "Wrong/unconfigured DBNUM reported in DEBUGCAUSE: %d\n",
  268. dbnum);
  269. }
  270. return 0;
  271. }
  272. return -ENOENT;
  273. }