ftrace.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289
  1. /*
  2. * Dynamic function tracing support.
  3. *
  4. * Copyright (C) 2008 Abhishek Sagar <sagar.abhishek@gmail.com>
  5. * Copyright (C) 2010 Rabin Vincent <rabin@rab.in>
  6. *
  7. * For licencing details, see COPYING.
  8. *
  9. * Defines low-level handling of mcount calls when the kernel
  10. * is compiled with the -pg flag. When using dynamic ftrace, the
  11. * mcount call-sites get patched with NOP till they are enabled.
  12. * All code mutation routines here are called under stop_machine().
  13. */
  14. #include <linux/ftrace.h>
  15. #include <linux/uaccess.h>
  16. #include <asm/cacheflush.h>
  17. #include <asm/ftrace.h>
  18. #ifdef CONFIG_THUMB2_KERNEL
  19. #define NOP 0xeb04f85d /* pop.w {lr} */
  20. #else
  21. #define NOP 0xe8bd4000 /* pop {lr} */
  22. #endif
  23. #ifdef CONFIG_DYNAMIC_FTRACE
  24. #ifdef CONFIG_OLD_MCOUNT
  25. #define OLD_MCOUNT_ADDR ((unsigned long) mcount)
  26. #define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
  27. #define OLD_NOP 0xe1a00000 /* mov r0, r0 */
  28. static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
  29. {
  30. return rec->arch.old_mcount ? OLD_NOP : NOP;
  31. }
  32. static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
  33. {
  34. if (!rec->arch.old_mcount)
  35. return addr;
  36. if (addr == MCOUNT_ADDR)
  37. addr = OLD_MCOUNT_ADDR;
  38. else if (addr == FTRACE_ADDR)
  39. addr = OLD_FTRACE_ADDR;
  40. return addr;
  41. }
  42. #else
  43. static unsigned long ftrace_nop_replace(struct dyn_ftrace *rec)
  44. {
  45. return NOP;
  46. }
  47. static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
  48. {
  49. return addr;
  50. }
  51. #endif
  52. #ifdef CONFIG_THUMB2_KERNEL
  53. static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
  54. bool link)
  55. {
  56. unsigned long s, j1, j2, i1, i2, imm10, imm11;
  57. unsigned long first, second;
  58. long offset;
  59. offset = (long)addr - (long)(pc + 4);
  60. if (offset < -16777216 || offset > 16777214) {
  61. WARN_ON_ONCE(1);
  62. return 0;
  63. }
  64. s = (offset >> 24) & 0x1;
  65. i1 = (offset >> 23) & 0x1;
  66. i2 = (offset >> 22) & 0x1;
  67. imm10 = (offset >> 12) & 0x3ff;
  68. imm11 = (offset >> 1) & 0x7ff;
  69. j1 = (!i1) ^ s;
  70. j2 = (!i2) ^ s;
  71. first = 0xf000 | (s << 10) | imm10;
  72. second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11;
  73. if (link)
  74. second |= 1 << 14;
  75. return (second << 16) | first;
  76. }
  77. #else
  78. static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
  79. bool link)
  80. {
  81. unsigned long opcode = 0xea000000;
  82. long offset;
  83. if (link)
  84. opcode |= 1 << 24;
  85. offset = (long)addr - (long)(pc + 8);
  86. if (unlikely(offset < -33554432 || offset > 33554428)) {
  87. /* Can't generate branches that far (from ARM ARM). Ftrace
  88. * doesn't generate branches outside of kernel text.
  89. */
  90. WARN_ON_ONCE(1);
  91. return 0;
  92. }
  93. offset = (offset >> 2) & 0x00ffffff;
  94. return opcode | offset;
  95. }
  96. #endif
  97. static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
  98. {
  99. return ftrace_gen_branch(pc, addr, true);
  100. }
  101. static int ftrace_modify_code(unsigned long pc, unsigned long old,
  102. unsigned long new)
  103. {
  104. unsigned long replaced;
  105. if (probe_kernel_read(&replaced, (void *)pc, MCOUNT_INSN_SIZE))
  106. return -EFAULT;
  107. if (replaced != old)
  108. return -EINVAL;
  109. if (probe_kernel_write((void *)pc, &new, MCOUNT_INSN_SIZE))
  110. return -EPERM;
  111. flush_icache_range(pc, pc + MCOUNT_INSN_SIZE);
  112. return 0;
  113. }
  114. int ftrace_update_ftrace_func(ftrace_func_t func)
  115. {
  116. unsigned long pc, old;
  117. unsigned long new;
  118. int ret;
  119. pc = (unsigned long)&ftrace_call;
  120. memcpy(&old, &ftrace_call, MCOUNT_INSN_SIZE);
  121. new = ftrace_call_replace(pc, (unsigned long)func);
  122. ret = ftrace_modify_code(pc, old, new);
  123. #ifdef CONFIG_OLD_MCOUNT
  124. if (!ret) {
  125. pc = (unsigned long)&ftrace_call_old;
  126. memcpy(&old, &ftrace_call_old, MCOUNT_INSN_SIZE);
  127. new = ftrace_call_replace(pc, (unsigned long)func);
  128. ret = ftrace_modify_code(pc, old, new);
  129. }
  130. #endif
  131. return ret;
  132. }
  133. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  134. {
  135. unsigned long new, old;
  136. unsigned long ip = rec->ip;
  137. old = ftrace_nop_replace(rec);
  138. new = ftrace_call_replace(ip, adjust_address(rec, addr));
  139. return ftrace_modify_code(rec->ip, old, new);
  140. }
  141. int ftrace_make_nop(struct module *mod,
  142. struct dyn_ftrace *rec, unsigned long addr)
  143. {
  144. unsigned long ip = rec->ip;
  145. unsigned long old;
  146. unsigned long new;
  147. int ret;
  148. old = ftrace_call_replace(ip, adjust_address(rec, addr));
  149. new = ftrace_nop_replace(rec);
  150. ret = ftrace_modify_code(ip, old, new);
  151. #ifdef CONFIG_OLD_MCOUNT
  152. if (ret == -EINVAL && addr == MCOUNT_ADDR) {
  153. rec->arch.old_mcount = true;
  154. old = ftrace_call_replace(ip, adjust_address(rec, addr));
  155. new = ftrace_nop_replace(rec);
  156. ret = ftrace_modify_code(ip, old, new);
  157. }
  158. #endif
  159. return ret;
  160. }
  161. int __init ftrace_dyn_arch_init(void *data)
  162. {
  163. *(unsigned long *)data = 0;
  164. return 0;
  165. }
  166. #endif /* CONFIG_DYNAMIC_FTRACE */
  167. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  168. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
  169. unsigned long frame_pointer)
  170. {
  171. unsigned long return_hooker = (unsigned long) &return_to_handler;
  172. struct ftrace_graph_ent trace;
  173. unsigned long old;
  174. int err;
  175. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  176. return;
  177. old = *parent;
  178. *parent = return_hooker;
  179. err = ftrace_push_return_trace(old, self_addr, &trace.depth,
  180. frame_pointer);
  181. if (err == -EBUSY) {
  182. *parent = old;
  183. return;
  184. }
  185. trace.func = self_addr;
  186. /* Only trace if the calling function expects to */
  187. if (!ftrace_graph_entry(&trace)) {
  188. current->curr_ret_stack--;
  189. *parent = old;
  190. }
  191. }
  192. #ifdef CONFIG_DYNAMIC_FTRACE
  193. extern unsigned long ftrace_graph_call;
  194. extern unsigned long ftrace_graph_call_old;
  195. extern void ftrace_graph_caller_old(void);
  196. static int __ftrace_modify_caller(unsigned long *callsite,
  197. void (*func) (void), bool enable)
  198. {
  199. unsigned long caller_fn = (unsigned long) func;
  200. unsigned long pc = (unsigned long) callsite;
  201. unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
  202. unsigned long nop = 0xe1a00000; /* mov r0, r0 */
  203. unsigned long old = enable ? nop : branch;
  204. unsigned long new = enable ? branch : nop;
  205. return ftrace_modify_code(pc, old, new);
  206. }
  207. static int ftrace_modify_graph_caller(bool enable)
  208. {
  209. int ret;
  210. ret = __ftrace_modify_caller(&ftrace_graph_call,
  211. ftrace_graph_caller,
  212. enable);
  213. #ifdef CONFIG_OLD_MCOUNT
  214. if (!ret)
  215. ret = __ftrace_modify_caller(&ftrace_graph_call_old,
  216. ftrace_graph_caller_old,
  217. enable);
  218. #endif
  219. return ret;
  220. }
  221. int ftrace_enable_ftrace_graph_caller(void)
  222. {
  223. return ftrace_modify_graph_caller(true);
  224. }
  225. int ftrace_disable_ftrace_graph_caller(void)
  226. {
  227. return ftrace_modify_graph_caller(false);
  228. }
  229. #endif /* CONFIG_DYNAMIC_FTRACE */
  230. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */