ftrace.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552
  1. /*
  2. * Code for replacing ftrace calls with jumps.
  3. *
  4. * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
  5. *
  6. * Thanks goes out to P.A. Semi, Inc for supplying me with a PPC64 box.
  7. *
  8. * Added function graph tracer code, taken from x86 that was written
  9. * by Frederic Weisbecker, and ported to PPC by Steven Rostedt.
  10. *
  11. */
  12. #define pr_fmt(fmt) "ftrace-powerpc: " fmt
  13. #include <linux/spinlock.h>
  14. #include <linux/hardirq.h>
  15. #include <linux/uaccess.h>
  16. #include <linux/module.h>
  17. #include <linux/ftrace.h>
  18. #include <linux/percpu.h>
  19. #include <linux/init.h>
  20. #include <linux/list.h>
  21. #include <asm/cacheflush.h>
  22. #include <asm/code-patching.h>
  23. #include <asm/ftrace.h>
  24. #include <asm/syscall.h>
  25. #ifdef CONFIG_DYNAMIC_FTRACE
  26. static unsigned int
  27. ftrace_call_replace(unsigned long ip, unsigned long addr, int link)
  28. {
  29. unsigned int op;
  30. addr = ppc_function_entry((void *)addr);
  31. /* if (link) set op to 'bl' else 'b' */
  32. op = create_branch((unsigned int *)ip, addr, link ? 1 : 0);
  33. return op;
  34. }
  35. static int
  36. ftrace_modify_code(unsigned long ip, unsigned int old, unsigned int new)
  37. {
  38. unsigned int replaced;
  39. /*
  40. * Note: Due to modules and __init, code can
  41. * disappear and change, we need to protect against faulting
  42. * as well as code changing. We do this by using the
  43. * probe_kernel_* functions.
  44. *
  45. * No real locking needed, this code is run through
  46. * kstop_machine, or before SMP starts.
  47. */
  48. /* read the text we want to modify */
  49. if (probe_kernel_read(&replaced, (void *)ip, MCOUNT_INSN_SIZE))
  50. return -EFAULT;
  51. /* Make sure it is what we expect it to be */
  52. if (replaced != old)
  53. return -EINVAL;
  54. /* replace the text with the new text */
  55. if (patch_instruction((unsigned int *)ip, new))
  56. return -EPERM;
  57. return 0;
  58. }
  59. /*
  60. * Helper functions that are the same for both PPC64 and PPC32.
  61. */
  62. static int test_24bit_addr(unsigned long ip, unsigned long addr)
  63. {
  64. addr = ppc_function_entry((void *)addr);
  65. /* use the create_branch to verify that this offset can be branched */
  66. return create_branch((unsigned int *)ip, addr, 0);
  67. }
  68. #ifdef CONFIG_MODULES
  69. static int is_bl_op(unsigned int op)
  70. {
  71. return (op & 0xfc000003) == 0x48000001;
  72. }
  73. static unsigned long find_bl_target(unsigned long ip, unsigned int op)
  74. {
  75. static int offset;
  76. offset = (op & 0x03fffffc);
  77. /* make it signed */
  78. if (offset & 0x02000000)
  79. offset |= 0xfe000000;
  80. return ip + (long)offset;
  81. }
  82. #ifdef CONFIG_PPC64
  83. static int
  84. __ftrace_make_nop(struct module *mod,
  85. struct dyn_ftrace *rec, unsigned long addr)
  86. {
  87. unsigned int op;
  88. unsigned long entry, ptr;
  89. unsigned long ip = rec->ip;
  90. void *tramp;
  91. /* read where this goes */
  92. if (probe_kernel_read(&op, (void *)ip, sizeof(int)))
  93. return -EFAULT;
  94. /* Make sure that that this is still a 24bit jump */
  95. if (!is_bl_op(op)) {
  96. pr_err("Not expected bl: opcode is %x\n", op);
  97. return -EINVAL;
  98. }
  99. /* lets find where the pointer goes */
  100. tramp = (void *)find_bl_target(ip, op);
  101. pr_devel("ip:%lx jumps to %p", ip, tramp);
  102. if (!is_module_trampoline(tramp)) {
  103. pr_err("Not a trampoline\n");
  104. return -EINVAL;
  105. }
  106. if (module_trampoline_target(mod, tramp, &ptr)) {
  107. pr_err("Failed to get trampoline target\n");
  108. return -EFAULT;
  109. }
  110. pr_devel("trampoline target %lx", ptr);
  111. entry = ppc_global_function_entry((void *)addr);
  112. /* This should match what was called */
  113. if (ptr != entry) {
  114. pr_err("addr %lx does not match expected %lx\n", ptr, entry);
  115. return -EINVAL;
  116. }
  117. /*
  118. * Our original call site looks like:
  119. *
  120. * bl <tramp>
  121. * ld r2,XX(r1)
  122. *
  123. * Milton Miller pointed out that we can not simply nop the branch.
  124. * If a task was preempted when calling a trace function, the nops
  125. * will remove the way to restore the TOC in r2 and the r2 TOC will
  126. * get corrupted.
  127. *
  128. * Use a b +8 to jump over the load.
  129. */
  130. op = 0x48000008; /* b +8 */
  131. if (patch_instruction((unsigned int *)ip, op))
  132. return -EPERM;
  133. return 0;
  134. }
  135. #else /* !PPC64 */
  136. static int
  137. __ftrace_make_nop(struct module *mod,
  138. struct dyn_ftrace *rec, unsigned long addr)
  139. {
  140. unsigned int op;
  141. unsigned int jmp[4];
  142. unsigned long ip = rec->ip;
  143. unsigned long tramp;
  144. if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
  145. return -EFAULT;
  146. /* Make sure that that this is still a 24bit jump */
  147. if (!is_bl_op(op)) {
  148. pr_err("Not expected bl: opcode is %x\n", op);
  149. return -EINVAL;
  150. }
  151. /* lets find where the pointer goes */
  152. tramp = find_bl_target(ip, op);
  153. /*
  154. * On PPC32 the trampoline looks like:
  155. * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
  156. * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
  157. * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
  158. * 0x4e, 0x80, 0x04, 0x20 bctr
  159. */
  160. pr_devel("ip:%lx jumps to %lx", ip, tramp);
  161. /* Find where the trampoline jumps to */
  162. if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
  163. pr_err("Failed to read %lx\n", tramp);
  164. return -EFAULT;
  165. }
  166. pr_devel(" %08x %08x ", jmp[0], jmp[1]);
  167. /* verify that this is what we expect it to be */
  168. if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
  169. ((jmp[1] & 0xffff0000) != 0x398c0000) ||
  170. (jmp[2] != 0x7d8903a6) ||
  171. (jmp[3] != 0x4e800420)) {
  172. pr_err("Not a trampoline\n");
  173. return -EINVAL;
  174. }
  175. tramp = (jmp[1] & 0xffff) |
  176. ((jmp[0] & 0xffff) << 16);
  177. if (tramp & 0x8000)
  178. tramp -= 0x10000;
  179. pr_devel(" %lx ", tramp);
  180. if (tramp != addr) {
  181. pr_err("Trampoline location %08lx does not match addr\n",
  182. tramp);
  183. return -EINVAL;
  184. }
  185. op = PPC_INST_NOP;
  186. if (patch_instruction((unsigned int *)ip, op))
  187. return -EPERM;
  188. return 0;
  189. }
  190. #endif /* PPC64 */
  191. #endif /* CONFIG_MODULES */
  192. int ftrace_make_nop(struct module *mod,
  193. struct dyn_ftrace *rec, unsigned long addr)
  194. {
  195. unsigned long ip = rec->ip;
  196. unsigned int old, new;
  197. /*
  198. * If the calling address is more that 24 bits away,
  199. * then we had to use a trampoline to make the call.
  200. * Otherwise just update the call site.
  201. */
  202. if (test_24bit_addr(ip, addr)) {
  203. /* within range */
  204. old = ftrace_call_replace(ip, addr, 1);
  205. new = PPC_INST_NOP;
  206. return ftrace_modify_code(ip, old, new);
  207. }
  208. #ifdef CONFIG_MODULES
  209. /*
  210. * Out of range jumps are called from modules.
  211. * We should either already have a pointer to the module
  212. * or it has been passed in.
  213. */
  214. if (!rec->arch.mod) {
  215. if (!mod) {
  216. pr_err("No module loaded addr=%lx\n", addr);
  217. return -EFAULT;
  218. }
  219. rec->arch.mod = mod;
  220. } else if (mod) {
  221. if (mod != rec->arch.mod) {
  222. pr_err("Record mod %p not equal to passed in mod %p\n",
  223. rec->arch.mod, mod);
  224. return -EINVAL;
  225. }
  226. /* nothing to do if mod == rec->arch.mod */
  227. } else
  228. mod = rec->arch.mod;
  229. return __ftrace_make_nop(mod, rec, addr);
  230. #else
  231. /* We should not get here without modules */
  232. return -EINVAL;
  233. #endif /* CONFIG_MODULES */
  234. }
  235. #ifdef CONFIG_MODULES
  236. #ifdef CONFIG_PPC64
  237. static int
  238. __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  239. {
  240. unsigned int op[2];
  241. void *ip = (void *)rec->ip;
  242. /* read where this goes */
  243. if (probe_kernel_read(op, ip, sizeof(op)))
  244. return -EFAULT;
  245. /*
  246. * We expect to see:
  247. *
  248. * b +8
  249. * ld r2,XX(r1)
  250. *
  251. * The load offset is different depending on the ABI. For simplicity
  252. * just mask it out when doing the compare.
  253. */
  254. if ((op[0] != 0x48000008) || ((op[1] & 0xffff0000) != 0xe8410000)) {
  255. pr_err("Unexpected call sequence: %x %x\n", op[0], op[1]);
  256. return -EINVAL;
  257. }
  258. /* If we never set up a trampoline to ftrace_caller, then bail */
  259. if (!rec->arch.mod->arch.tramp) {
  260. pr_err("No ftrace trampoline\n");
  261. return -EINVAL;
  262. }
  263. /* Ensure branch is within 24 bits */
  264. if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
  265. pr_err("Branch out of range\n");
  266. return -EINVAL;
  267. }
  268. if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
  269. pr_err("REL24 out of range!\n");
  270. return -EINVAL;
  271. }
  272. return 0;
  273. }
  274. #else
  275. static int
  276. __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  277. {
  278. unsigned int op;
  279. unsigned long ip = rec->ip;
  280. /* read where this goes */
  281. if (probe_kernel_read(&op, (void *)ip, MCOUNT_INSN_SIZE))
  282. return -EFAULT;
  283. /* It should be pointing to a nop */
  284. if (op != PPC_INST_NOP) {
  285. pr_err("Expected NOP but have %x\n", op);
  286. return -EINVAL;
  287. }
  288. /* If we never set up a trampoline to ftrace_caller, then bail */
  289. if (!rec->arch.mod->arch.tramp) {
  290. pr_err("No ftrace trampoline\n");
  291. return -EINVAL;
  292. }
  293. /* create the branch to the trampoline */
  294. op = create_branch((unsigned int *)ip,
  295. rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
  296. if (!op) {
  297. pr_err("REL24 out of range!\n");
  298. return -EINVAL;
  299. }
  300. pr_devel("write to %lx\n", rec->ip);
  301. if (patch_instruction((unsigned int *)ip, op))
  302. return -EPERM;
  303. return 0;
  304. }
  305. #endif /* CONFIG_PPC64 */
  306. #endif /* CONFIG_MODULES */
  307. int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
  308. {
  309. unsigned long ip = rec->ip;
  310. unsigned int old, new;
  311. /*
  312. * If the calling address is more that 24 bits away,
  313. * then we had to use a trampoline to make the call.
  314. * Otherwise just update the call site.
  315. */
  316. if (test_24bit_addr(ip, addr)) {
  317. /* within range */
  318. old = PPC_INST_NOP;
  319. new = ftrace_call_replace(ip, addr, 1);
  320. return ftrace_modify_code(ip, old, new);
  321. }
  322. #ifdef CONFIG_MODULES
  323. /*
  324. * Out of range jumps are called from modules.
  325. * Being that we are converting from nop, it had better
  326. * already have a module defined.
  327. */
  328. if (!rec->arch.mod) {
  329. pr_err("No module loaded\n");
  330. return -EINVAL;
  331. }
  332. return __ftrace_make_call(rec, addr);
  333. #else
  334. /* We should not get here without modules */
  335. return -EINVAL;
  336. #endif /* CONFIG_MODULES */
  337. }
  338. int ftrace_update_ftrace_func(ftrace_func_t func)
  339. {
  340. unsigned long ip = (unsigned long)(&ftrace_call);
  341. unsigned int old, new;
  342. int ret;
  343. old = *(unsigned int *)&ftrace_call;
  344. new = ftrace_call_replace(ip, (unsigned long)func, 1);
  345. ret = ftrace_modify_code(ip, old, new);
  346. return ret;
  347. }
  348. static int __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
  349. {
  350. unsigned long ftrace_addr = (unsigned long)FTRACE_ADDR;
  351. int ret;
  352. ret = ftrace_update_record(rec, enable);
  353. switch (ret) {
  354. case FTRACE_UPDATE_IGNORE:
  355. return 0;
  356. case FTRACE_UPDATE_MAKE_CALL:
  357. return ftrace_make_call(rec, ftrace_addr);
  358. case FTRACE_UPDATE_MAKE_NOP:
  359. return ftrace_make_nop(NULL, rec, ftrace_addr);
  360. }
  361. return 0;
  362. }
  363. void ftrace_replace_code(int enable)
  364. {
  365. struct ftrace_rec_iter *iter;
  366. struct dyn_ftrace *rec;
  367. int ret;
  368. for (iter = ftrace_rec_iter_start(); iter;
  369. iter = ftrace_rec_iter_next(iter)) {
  370. rec = ftrace_rec_iter_record(iter);
  371. ret = __ftrace_replace_code(rec, enable);
  372. if (ret) {
  373. ftrace_bug(ret, rec);
  374. return;
  375. }
  376. }
  377. }
  378. void arch_ftrace_update_code(int command)
  379. {
  380. if (command & FTRACE_UPDATE_CALLS)
  381. ftrace_replace_code(1);
  382. else if (command & FTRACE_DISABLE_CALLS)
  383. ftrace_replace_code(0);
  384. if (command & FTRACE_UPDATE_TRACE_FUNC)
  385. ftrace_update_ftrace_func(ftrace_trace_function);
  386. if (command & FTRACE_START_FUNC_RET)
  387. ftrace_enable_ftrace_graph_caller();
  388. else if (command & FTRACE_STOP_FUNC_RET)
  389. ftrace_disable_ftrace_graph_caller();
  390. }
  391. int __init ftrace_dyn_arch_init(void)
  392. {
  393. return 0;
  394. }
  395. #endif /* CONFIG_DYNAMIC_FTRACE */
  396. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  397. #ifdef CONFIG_DYNAMIC_FTRACE
  398. extern void ftrace_graph_call(void);
  399. extern void ftrace_graph_stub(void);
  400. int ftrace_enable_ftrace_graph_caller(void)
  401. {
  402. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  403. unsigned long addr = (unsigned long)(&ftrace_graph_caller);
  404. unsigned long stub = (unsigned long)(&ftrace_graph_stub);
  405. unsigned int old, new;
  406. old = ftrace_call_replace(ip, stub, 0);
  407. new = ftrace_call_replace(ip, addr, 0);
  408. return ftrace_modify_code(ip, old, new);
  409. }
  410. int ftrace_disable_ftrace_graph_caller(void)
  411. {
  412. unsigned long ip = (unsigned long)(&ftrace_graph_call);
  413. unsigned long addr = (unsigned long)(&ftrace_graph_caller);
  414. unsigned long stub = (unsigned long)(&ftrace_graph_stub);
  415. unsigned int old, new;
  416. old = ftrace_call_replace(ip, addr, 0);
  417. new = ftrace_call_replace(ip, stub, 0);
  418. return ftrace_modify_code(ip, old, new);
  419. }
  420. #endif /* CONFIG_DYNAMIC_FTRACE */
  421. /*
  422. * Hook the return address and push it in the stack of return addrs
  423. * in current thread info. Return the address we want to divert to.
  424. */
  425. unsigned long prepare_ftrace_return(unsigned long parent, unsigned long ip)
  426. {
  427. struct ftrace_graph_ent trace;
  428. unsigned long return_hooker;
  429. if (unlikely(ftrace_graph_is_dead()))
  430. goto out;
  431. if (unlikely(atomic_read(&current->tracing_graph_pause)))
  432. goto out;
  433. return_hooker = ppc_function_entry(return_to_handler);
  434. trace.func = ip;
  435. trace.depth = current->curr_ret_stack + 1;
  436. /* Only trace if the calling function expects to */
  437. if (!ftrace_graph_entry(&trace))
  438. goto out;
  439. if (ftrace_push_return_trace(parent, ip, &trace.depth, 0) == -EBUSY)
  440. goto out;
  441. parent = return_hooker;
  442. out:
  443. return parent;
  444. }
  445. #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
  446. #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64)
  447. unsigned long __init arch_syscall_addr(int nr)
  448. {
  449. return sys_call_table[nr*2];
  450. }
  451. #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */