ftrace.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276
  1. /*
  2. * Dynamic function tracing support.
  3. *
  4. * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
  5. * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
  6. *
  7. * For licencing details, see COPYING.
  8. *
  9. * Defines low-level handling of mcount calls when the kernel
  10. * is compiled with the -pg flag. When using dynamic ftrace, the
  11. * mcount call-sites get patched with NOP till they are enabled.
  12. * All code mutation routines here are called under stop_machine().
  13. */
  14. #include <linux/ftrace.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/module.h>
  17. #include <linux/stop_machine.h>
  18. #include <asm/cacheflush.h>
  19. #include <asm/opcodes.h>
  20. #include <asm/ftrace.h>
  21. #include <asm/insn.h>
  22. #ifdef CONFIG_THUMB2_KERNEL
  23. #define NOP 0xf85deb04 /* pop.w {lr} */
  24. #else
  25. #define NOP 0xe8bd4000 /* pop {lr} */
  26. #endif
  27. #ifdef CONFIG_DYNAMIC_FTRACE
  28. static int __ftrace_modify_code(void *data)
  29. {
  30. int *command = data;
  31. set_kernel_text_rw();
  32. ftrace_modify_all_code(*command);
  33. set_kernel_text_ro();
  34. return 0;
  35. }
  36. void arch_ftrace_update_code(int command)
  37. {
  38. stop_machine(__ftrace_modify_code, &command, NULL);
  39. }
  40. #ifdef CONFIG_OLD_MCOUNT
  41. #define OLD_MCOUNT_ADDR ((unsigned long) mcount)
  42. #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
  43. #define OLD_NOP 0xe1a00000 /* mov r0, r0 */
  44. static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
  45. {
  46. return rec->arch.old_mcount ? OLD_NOP : NOP;
  47. }
  48. static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
  49. {
  50. if (!rec->arch.old_mcount)
  51. return addr;
  52. if (addr == MCOUNT_ADDR)
  53. addr = OLD_MCOUNT_ADDR;
  54. else if (addr == FTRACE_ADDR)
  55. addr = OLD_FTRACE_ADDR;
  56. return addr;
  57. }
  58. #else
  59. static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
  60. {
  61. return NOP;
  62. }
  63. static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
  64. {
  65. return addr;
  66. }
  67. #endif
  68. int ftrace_arch_code_modify_prepare(void)
  69. {
  70. set_all_modules_text_rw();
  71. return 0;
  72. }
  73. int ftrace_arch_code_modify_post_process(void)
  74. {
  75. set_all_modules_text_ro();
  76. /* Make sure any TLB misses during machine stop are cleared. */
  77. flush_tlb_all();
  78. return 0;
  79. }
  80. static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
  81. {
  82. return arm_gen_branch_link(pc, addr);
  83. }
  84. static int ftrace_modify_code(unsigned long pc, unsigned long old,
  85. unsigned long new, bool validate)
  86. {
  87. unsigned long replaced;
  88. if (IS_ENABLED(CONFIG_THUMB2_KERNEL)) {
  89. old = __opcode_to_mem_thumb32(old);
  90. new = __opcode_to_mem_thumb32(new);
  91. } else {
  92. old = __opcode_to_mem_arm(old);
  93. new = __opcode_to_mem_arm(new);
  94. }
  95. if (validate) {
  96. if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
  97. return -EFAULT;
  98. if (replaced != old)
  99. return -EINVAL;
  100. }
  101. if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
  102. return -EPERM;
  103. flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
  104. return 0;
  105. }
  106. int ftrace_update_ftrace_func(ftrace_func_t func)
  107. {
  108. unsigned long pc;
  109. unsigned long new;
  110. int ret;
  111. pc = (unsigned long)&ftrace_call;
  112. new = ftrace_call_replace(pc, (unsigned long)func);
  113. ret = ftrace_modify_code(pc, 0, new, false);
  114. #ifdef CONFIG_OLD_MCOUNT
  115. if (!ret) {
  116. pc = (unsigned long)&ftrace_call_old;
  117. new = ftrace_call_replace(pc, (unsigned long)func);
  118. ret = ftrace_modify_code(pc, 0, new, false);
  119. }
  120. #endif
  121. return ret;
  122. }
  123. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  124. {
  125. unsigned long new, old;
  126. unsigned long ip = rec->ip;
  127. old = ftrace_nop_replace(rec);
  128. new = ftrace_call_replace(ip, adjust_address(rec, addr));
  129. return ftrace_modify_code(rec->ip, old, new, true);
  130. }
  131. int ftrace_make_nop(struct module *mod,
  132. struct dyn_ftrace *rec, unsigned long addr)
  133. {
  134. unsigned long ip = rec->ip;
  135. unsigned long old;
  136. unsigned long new;
  137. int ret;
  138. old = ftrace_call_replace(ip, adjust_address(rec, addr));
  139. new = ftrace_nop_replace(rec);
  140. ret = ftrace_modify_code(ip, old, new, true);
  141. #ifdef CONFIG_OLD_MCOUNT
  142. if (ret == -EINVAL && addr == MCOUNT_ADDR) {
  143. rec->arch.old_mcount = true;
  144. old = ftrace_call_replace(ip, adjust_address(rec, addr));
  145. new = ftrace_nop_replace(rec);
  146. ret = ftrace_modify_code(ip, old, new, true);
  147. }
  148. #endif
  149. return ret;
  150. }
  151. int __init ftrace_dyn_arch_init(void)
  152. {
  153. return 0;
  154. }
  155. #endif /* CONFIG_DYNAMIC_FTRACE */
  156. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  157. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
  158. unsigned long frame_pointer)
  159. {
  160. unsigned long return_hooker = (unsigned long) &return_to_handler;
  161. struct ftrace_graph_ent trace;
  162. unsigned long old;
  163. int err;
  164. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  165. return;
  166. old = *parent;
  167. *parent = return_hooker;
  168. trace.func = self_addr;
  169. trace.depth = current->curr_ret_stack + 1;
  170. /* Only trace if the calling function expects to */
  171. if (!ftrace_graph_entry(&trace)) {
  172. *parent = old;
  173. return;
  174. }
  175. err = ftrace_push_return_trace(old, self_addr, &trace.depth,
  176. frame_pointer, NULL);
  177. if (err == -EBUSY) {
  178. *parent = old;
  179. return;
  180. }
  181. }
  182. #ifdef CONFIG_DYNAMIC_FTRACE
  183. extern unsigned long ftrace_graph_call;
  184. extern unsigned long ftrace_graph_call_old;
  185. extern void ftrace_graph_caller_old(void);
  186. static int __ftrace_modify_caller(unsigned long *callsite,
  187. void (*func) (void), bool enable)
  188. {
  189. unsigned long caller_fn = (unsigned long) func;
  190. unsigned long pc = (unsigned long) callsite;
  191. unsigned long branch = arm_gen_branch(pc, caller_fn);
  192. unsigned long nop = 0xe1a00000; /* mov r0, r0 */
  193. unsigned long old = enable ? nop : branch;
  194. unsigned long new = enable ? branch : nop;
  195. return ftrace_modify_code(pc, old, new, true);
  196. }
  197. static int ftrace_modify_graph_caller(bool enable)
  198. {
  199. int ret;
  200. ret = __ftrace_modify_caller(&ftrace_graph_call,
  201. ftrace_graph_caller,
  202. enable);
  203. #ifdef CONFIG_OLD_MCOUNT
  204. if (!ret)
  205. ret = __ftrace_modify_caller(&ftrace_graph_call_old,
  206. ftrace_graph_caller_old,
  207. enable);
  208. #endif
  209. return ret;
  210. }
  211. int ftrace_enable_ftrace_graph_caller(void)
  212. {
  213. return ftrace_modify_graph_caller(true);
  214. }
  215. int ftrace_disable_ftrace_graph_caller(void)
  216. {
  217. return ftrace_modify_graph_caller(false);
  218. }
  219. #endif /* CONFIG_DYNAMIC_FTRACE */
  220. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */