ftrace.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186
  1. /*
  2. * Code for tracing calls in Linux kernel.
  3. * Copyright (C) 2009 Helge Deller <deller@gmx.de>
  4. *
  5. * based on code for x86 which is:
  6. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  7. *
  8. * future possible enhancements:
  9. * - add CONFIG_DYNAMIC_FTRACE
  10. * - add CONFIG_STACK_TRACER
  11. */
  12. #include <linux/init.h>
  13. #include <linux/ftrace.h>
  14. #include <asm/sections.h>
  15. #include <asm/ftrace.h>
  16. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  17. /* Add a function return address to the trace stack on thread info.*/
  18. static int push_return_trace(unsigned long ret, unsigned long long time,
  19. unsigned long func, int *depth)
  20. {
  21. int index;
  22. if (!current->ret_stack)
  23. return -EBUSY;
  24. /* The return trace stack is full */
  25. if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
  26. atomic_inc(&current->trace_overrun);
  27. return -EBUSY;
  28. }
  29. index = ++current->curr_ret_stack;
  30. barrier();
  31. current->ret_stack[index].ret = ret;
  32. current->ret_stack[index].func = func;
  33. current->ret_stack[index].calltime = time;
  34. *depth = index;
  35. return 0;
  36. }
  37. /* Retrieve a function return address to the trace stack on thread info.*/
  38. static void pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret)
  39. {
  40. int index;
  41. index = current->curr_ret_stack;
  42. if (unlikely(index < 0)) {
  43. ftrace_graph_stop();
  44. WARN_ON(1);
  45. /* Might as well panic, otherwise we have no where to go */
  46. *ret = (unsigned long)
  47. dereference_function_descriptor(&panic);
  48. return;
  49. }
  50. *ret = current->ret_stack[index].ret;
  51. trace->func = current->ret_stack[index].func;
  52. trace->calltime = current->ret_stack[index].calltime;
  53. trace->overrun = atomic_read(&current->trace_overrun);
  54. trace->depth = index;
  55. barrier();
  56. current->curr_ret_stack--;
  57. }
  58. /*
  59. * Send the trace to the ring-buffer.
  60. * @return the original return address.
  61. */
  62. unsigned long ftrace_return_to_handler(unsigned long retval0,
  63. unsigned long retval1)
  64. {
  65. struct ftrace_graph_ret trace;
  66. unsigned long ret;
  67. pop_return_trace(&trace, &ret);
  68. trace.rettime = local_clock();
  69. ftrace_graph_return(&trace);
  70. if (unlikely(!ret)) {
  71. ftrace_graph_stop();
  72. WARN_ON(1);
  73. /* Might as well panic. What else to do? */
  74. ret = (unsigned long)
  75. dereference_function_descriptor(&panic);
  76. }
  77. /* HACK: we hand over the old functions' return values
  78. in %r23 and %r24. Assembly in entry.S will take care
  79. and move those to their final registers %ret0 and %ret1 */
  80. asm( "copy %0, %%r23 \n\t"
  81. "copy %1, %%r24 \n" : : "r" (retval0), "r" (retval1) );
  82. return ret;
  83. }
  84. /*
  85. * Hook the return address and push it in the stack of return addrs
  86. * in current thread info.
  87. */
  88. void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
  89. {
  90. unsigned long old;
  91. unsigned long long calltime;
  92. struct ftrace_graph_ent trace;
  93. if (unlikely(ftrace_graph_is_dead()))
  94. return;
  95. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  96. return;
  97. old = *parent;
  98. *parent = (unsigned long)
  99. dereference_function_descriptor(&return_to_handler);
  100. if (unlikely(!__kernel_text_address(old))) {
  101. ftrace_graph_stop();
  102. *parent = old;
  103. WARN_ON(1);
  104. return;
  105. }
  106. calltime = local_clock();
  107. if (push_return_trace(old, calltime,
  108. self_addr, &trace.depth) == -EBUSY) {
  109. *parent = old;
  110. return;
  111. }
  112. trace.func = self_addr;
  113. /* Only trace if the calling function expects to */
  114. if (!ftrace_graph_entry(&trace)) {
  115. current->curr_ret_stack--;
  116. *parent = old;
  117. }
  118. }
  119. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  120. void ftrace_function_trampoline(unsigned long parent,
  121. unsigned long self_addr,
  122. unsigned long org_sp_gr3)
  123. {
  124. extern ftrace_func_t ftrace_trace_function;
  125. if (ftrace_trace_function != ftrace_stub) {
  126. ftrace_trace_function(parent, self_addr);
  127. return;
  128. }
  129. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  130. if (ftrace_graph_entry && ftrace_graph_return) {
  131. unsigned long sp;
  132. unsigned long *parent_rp;
  133. asm volatile ("copy %%r30, %0" : "=r"(sp));
  134. /* sanity check: is stack pointer which we got from
  135. assembler function in entry.S in a reasonable
  136. range compared to current stack pointer? */
  137. if ((sp - org_sp_gr3) > 0x400)
  138. return;
  139. /* calculate pointer to %rp in stack */
  140. parent_rp = (unsigned long *) org_sp_gr3 - 0x10;
  141. /* sanity check: parent_rp should hold parent */
  142. if (*parent_rp != parent)
  143. return;
  144. prepare_ftrace_return(parent_rp, self_addr);
  145. return;
  146. }
  147. #endif
  148. }