kprobes.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484
  1. /*
  2. * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. */
  8. #include <linux/types.h>
  9. #include <linux/kprobes.h>
  10. #include <linux/slab.h>
  11. #include <linux/module.h>
  12. #include <linux/kdebug.h>
  13. #include <linux/sched.h>
  14. #include <linux/uaccess.h>
  15. #include <asm/cacheflush.h>
  16. #include <asm/current.h>
  17. #include <asm/disasm.h>
  18. #define MIN_STACK_SIZE(addr) min((unsigned long)MAX_STACK_SIZE, \
  19. (unsigned long)current_thread_info() + THREAD_SIZE - (addr))
  20. DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
  21. DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
  22. int __kprobes arch_prepare_kprobe(struct kprobe *p)
  23. {
  24. /* Attempt to probe at unaligned address */
  25. if ((unsigned long)p->addr & 0x01)
  26. return -EINVAL;
  27. /* Address should not be in exception handling code */
  28. p->ainsn.is_short = is_short_instr((unsigned long)p->addr);
  29. p->opcode = *p->addr;
  30. return 0;
  31. }
  32. void __kprobes arch_arm_kprobe(struct kprobe *p)
  33. {
  34. *p->addr = UNIMP_S_INSTRUCTION;
  35. flush_icache_range((unsigned long)p->addr,
  36. (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  37. }
  38. void __kprobes arch_disarm_kprobe(struct kprobe *p)
  39. {
  40. *p->addr = p->opcode;
  41. flush_icache_range((unsigned long)p->addr,
  42. (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  43. }
  44. void __kprobes arch_remove_kprobe(struct kprobe *p)
  45. {
  46. arch_disarm_kprobe(p);
  47. /* Can we remove the kprobe in the middle of kprobe handling? */
  48. if (p->ainsn.t1_addr) {
  49. *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
  50. flush_icache_range((unsigned long)p->ainsn.t1_addr,
  51. (unsigned long)p->ainsn.t1_addr +
  52. sizeof(kprobe_opcode_t));
  53. p->ainsn.t1_addr = NULL;
  54. }
  55. if (p->ainsn.t2_addr) {
  56. *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
  57. flush_icache_range((unsigned long)p->ainsn.t2_addr,
  58. (unsigned long)p->ainsn.t2_addr +
  59. sizeof(kprobe_opcode_t));
  60. p->ainsn.t2_addr = NULL;
  61. }
  62. }
  63. static void __kprobes save_previous_kprobe(struct kprobe_ctlblk *kcb)
  64. {
  65. kcb->prev_kprobe.kp = kprobe_running();
  66. kcb->prev_kprobe.status = kcb->kprobe_status;
  67. }
  68. static void __kprobes restore_previous_kprobe(struct kprobe_ctlblk *kcb)
  69. {
  70. __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
  71. kcb->kprobe_status = kcb->prev_kprobe.status;
  72. }
  73. static inline void __kprobes set_current_kprobe(struct kprobe *p)
  74. {
  75. __this_cpu_write(current_kprobe, p);
  76. }
  77. static void __kprobes resume_execution(struct kprobe *p, unsigned long addr,
  78. struct pt_regs *regs)
  79. {
  80. /* Remove the trap instructions inserted for single step and
  81. * restore the original instructions
  82. */
  83. if (p->ainsn.t1_addr) {
  84. *(p->ainsn.t1_addr) = p->ainsn.t1_opcode;
  85. flush_icache_range((unsigned long)p->ainsn.t1_addr,
  86. (unsigned long)p->ainsn.t1_addr +
  87. sizeof(kprobe_opcode_t));
  88. p->ainsn.t1_addr = NULL;
  89. }
  90. if (p->ainsn.t2_addr) {
  91. *(p->ainsn.t2_addr) = p->ainsn.t2_opcode;
  92. flush_icache_range((unsigned long)p->ainsn.t2_addr,
  93. (unsigned long)p->ainsn.t2_addr +
  94. sizeof(kprobe_opcode_t));
  95. p->ainsn.t2_addr = NULL;
  96. }
  97. return;
  98. }
  99. static void __kprobes setup_singlestep(struct kprobe *p, struct pt_regs *regs)
  100. {
  101. unsigned long next_pc;
  102. unsigned long tgt_if_br = 0;
  103. int is_branch;
  104. unsigned long bta;
  105. /* Copy the opcode back to the kprobe location and execute the
  106. * instruction. Because of this we will not be able to get into the
  107. * same kprobe until this kprobe is done
  108. */
  109. *(p->addr) = p->opcode;
  110. flush_icache_range((unsigned long)p->addr,
  111. (unsigned long)p->addr + sizeof(kprobe_opcode_t));
  112. /* Now we insert the trap at the next location after this instruction to
  113. * single step. If it is a branch we insert the trap at possible branch
  114. * targets
  115. */
  116. bta = regs->bta;
  117. if (regs->status32 & 0x40) {
  118. /* We are in a delay slot with the branch taken */
  119. next_pc = bta & ~0x01;
  120. if (!p->ainsn.is_short) {
  121. if (bta & 0x01)
  122. regs->blink += 2;
  123. else {
  124. /* Branch not taken */
  125. next_pc += 2;
  126. /* next pc is taken from bta after executing the
  127. * delay slot instruction
  128. */
  129. regs->bta += 2;
  130. }
  131. }
  132. is_branch = 0;
  133. } else
  134. is_branch =
  135. disasm_next_pc((unsigned long)p->addr, regs,
  136. (struct callee_regs *) current->thread.callee_reg,
  137. &next_pc, &tgt_if_br);
  138. p->ainsn.t1_addr = (kprobe_opcode_t *) next_pc;
  139. p->ainsn.t1_opcode = *(p->ainsn.t1_addr);
  140. *(p->ainsn.t1_addr) = TRAP_S_2_INSTRUCTION;
  141. flush_icache_range((unsigned long)p->ainsn.t1_addr,
  142. (unsigned long)p->ainsn.t1_addr +
  143. sizeof(kprobe_opcode_t));
  144. if (is_branch) {
  145. p->ainsn.t2_addr = (kprobe_opcode_t *) tgt_if_br;
  146. p->ainsn.t2_opcode = *(p->ainsn.t2_addr);
  147. *(p->ainsn.t2_addr) = TRAP_S_2_INSTRUCTION;
  148. flush_icache_range((unsigned long)p->ainsn.t2_addr,
  149. (unsigned long)p->ainsn.t2_addr +
  150. sizeof(kprobe_opcode_t));
  151. }
  152. }
  153. int __kprobes arc_kprobe_handler(unsigned long addr, struct pt_regs *regs)
  154. {
  155. struct kprobe *p;
  156. struct kprobe_ctlblk *kcb;
  157. preempt_disable();
  158. kcb = get_kprobe_ctlblk();
  159. p = get_kprobe((unsigned long *)addr);
  160. if (p) {
  161. /*
  162. * We have reentered the kprobe_handler, since another kprobe
  163. * was hit while within the handler, we save the original
  164. * kprobes and single step on the instruction of the new probe
  165. * without calling any user handlers to avoid recursive
  166. * kprobes.
  167. */
  168. if (kprobe_running()) {
  169. save_previous_kprobe(kcb);
  170. set_current_kprobe(p);
  171. kprobes_inc_nmissed_count(p);
  172. setup_singlestep(p, regs);
  173. kcb->kprobe_status = KPROBE_REENTER;
  174. return 1;
  175. }
  176. set_current_kprobe(p);
  177. kcb->kprobe_status = KPROBE_HIT_ACTIVE;
  178. /* If we have no pre-handler or it returned 0, we continue with
  179. * normal processing. If we have a pre-handler and it returned
  180. * non-zero - which means user handler setup registers to exit
  181. * to another instruction, we must skip the single stepping.
  182. */
  183. if (!p->pre_handler || !p->pre_handler(p, regs)) {
  184. setup_singlestep(p, regs);
  185. kcb->kprobe_status = KPROBE_HIT_SS;
  186. } else {
  187. reset_current_kprobe();
  188. preempt_enable_no_resched();
  189. }
  190. return 1;
  191. }
  192. /* no_kprobe: */
  193. preempt_enable_no_resched();
  194. return 0;
  195. }
  196. static int __kprobes arc_post_kprobe_handler(unsigned long addr,
  197. struct pt_regs *regs)
  198. {
  199. struct kprobe *cur = kprobe_running();
  200. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  201. if (!cur)
  202. return 0;
  203. resume_execution(cur, addr, regs);
  204. /* Rearm the kprobe */
  205. arch_arm_kprobe(cur);
  206. /*
  207. * When we return from trap instruction we go to the next instruction
  208. * We restored the actual instruction in resume_exectuiont and we to
  209. * return to the same address and execute it
  210. */
  211. regs->ret = addr;
  212. if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
  213. kcb->kprobe_status = KPROBE_HIT_SSDONE;
  214. cur->post_handler(cur, regs, 0);
  215. }
  216. if (kcb->kprobe_status == KPROBE_REENTER) {
  217. restore_previous_kprobe(kcb);
  218. goto out;
  219. }
  220. reset_current_kprobe();
  221. out:
  222. preempt_enable_no_resched();
  223. return 1;
  224. }
  225. /*
  226. * Fault can be for the instruction being single stepped or for the
  227. * pre/post handlers in the module.
  228. * This is applicable for applications like user probes, where we have the
  229. * probe in user space and the handlers in the kernel
  230. */
  231. int __kprobes kprobe_fault_handler(struct pt_regs *regs, unsigned long trapnr)
  232. {
  233. struct kprobe *cur = kprobe_running();
  234. struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
  235. switch (kcb->kprobe_status) {
  236. case KPROBE_HIT_SS:
  237. case KPROBE_REENTER:
  238. /*
  239. * We are here because the instruction being single stepped
  240. * caused the fault. We reset the current kprobe and allow the
  241. * exception handler as if it is regular exception. In our
  242. * case it doesn't matter because the system will be halted
  243. */
  244. resume_execution(cur, (unsigned long)cur->addr, regs);
  245. if (kcb->kprobe_status == KPROBE_REENTER)
  246. restore_previous_kprobe(kcb);
  247. else
  248. reset_current_kprobe();
  249. preempt_enable_no_resched();
  250. break;
  251. case KPROBE_HIT_ACTIVE:
  252. case KPROBE_HIT_SSDONE:
  253. /*
  254. * We are here because the instructions in the pre/post handler
  255. * caused the fault.
  256. */
  257. /* We increment the nmissed count for accounting,
  258. * we can also use npre/npostfault count for accounting
  259. * these specific fault cases.
  260. */
  261. kprobes_inc_nmissed_count(cur);
  262. /*
  263. * We come here because instructions in the pre/post
  264. * handler caused the page_fault, this could happen
  265. * if handler tries to access user space by
  266. * copy_from_user(), get_user() etc. Let the
  267. * user-specified handler try to fix it first.
  268. */
  269. if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr))
  270. return 1;
  271. /*
  272. * In case the user-specified fault handler returned zero,
  273. * try to fix up.
  274. */
  275. if (fixup_exception(regs))
  276. return 1;
  277. /*
  278. * fixup_exception() could not handle it,
  279. * Let do_page_fault() fix it.
  280. */
  281. break;
  282. default:
  283. break;
  284. }
  285. return 0;
  286. }
  287. int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
  288. unsigned long val, void *data)
  289. {
  290. struct die_args *args = data;
  291. unsigned long addr = args->err;
  292. int ret = NOTIFY_DONE;
  293. switch (val) {
  294. case DIE_IERR:
  295. if (arc_kprobe_handler(addr, args->regs))
  296. return NOTIFY_STOP;
  297. break;
  298. case DIE_TRAP:
  299. if (arc_post_kprobe_handler(addr, args->regs))
  300. return NOTIFY_STOP;
  301. break;
  302. default:
  303. break;
  304. }
  305. return ret;
  306. }
  307. static void __used kretprobe_trampoline_holder(void)
  308. {
  309. __asm__ __volatile__(".global kretprobe_trampoline\n"
  310. "kretprobe_trampoline:\n" "nop\n");
  311. }
  312. void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
  313. struct pt_regs *regs)
  314. {
  315. ri->ret_addr = (kprobe_opcode_t *) regs->blink;
  316. /* Replace the return addr with trampoline addr */
  317. regs->blink = (unsigned long)&kretprobe_trampoline;
  318. }
  319. static int __kprobes trampoline_probe_handler(struct kprobe *p,
  320. struct pt_regs *regs)
  321. {
  322. struct kretprobe_instance *ri = NULL;
  323. struct hlist_head *head, empty_rp;
  324. struct hlist_node *tmp;
  325. unsigned long flags, orig_ret_address = 0;
  326. unsigned long trampoline_address = (unsigned long)&kretprobe_trampoline;
  327. INIT_HLIST_HEAD(&empty_rp);
  328. kretprobe_hash_lock(current, &head, &flags);
  329. /*
  330. * It is possible to have multiple instances associated with a given
  331. * task either because an multiple functions in the call path
  332. * have a return probe installed on them, and/or more than one return
  333. * return probe was registered for a target function.
  334. *
  335. * We can handle this because:
  336. * - instances are always inserted at the head of the list
  337. * - when multiple return probes are registered for the same
  338. * function, the first instance's ret_addr will point to the
  339. * real return address, and all the rest will point to
  340. * kretprobe_trampoline
  341. */
  342. hlist_for_each_entry_safe(ri, tmp, head, hlist) {
  343. if (ri->task != current)
  344. /* another task is sharing our hash bucket */
  345. continue;
  346. if (ri->rp && ri->rp->handler)
  347. ri->rp->handler(ri, regs);
  348. orig_ret_address = (unsigned long)ri->ret_addr;
  349. recycle_rp_inst(ri, &empty_rp);
  350. if (orig_ret_address != trampoline_address) {
  351. /*
  352. * This is the real return address. Any other
  353. * instances associated with this task are for
  354. * other calls deeper on the call stack
  355. */
  356. break;
  357. }
  358. }
  359. kretprobe_assert(ri, orig_ret_address, trampoline_address);
  360. regs->ret = orig_ret_address;
  361. kretprobe_hash_unlock(current, &flags);
  362. hlist_for_each_entry_safe(ri, tmp, &empty_rp, hlist) {
  363. hlist_del(&ri->hlist);
  364. kfree(ri);
  365. }
  366. /* By returning a non zero value, we are telling the kprobe handler
  367. * that we don't want the post_handler to run
  368. */
  369. return 1;
  370. }
  371. static struct kprobe trampoline_p = {
  372. .addr = (kprobe_opcode_t *) &kretprobe_trampoline,
  373. .pre_handler = trampoline_probe_handler
  374. };
  375. int __init arch_init_kprobes(void)
  376. {
  377. /* Registering the trampoline code for the kret probe */
  378. return register_kprobe(&trampoline_p);
  379. }
  380. int __kprobes arch_trampoline_kprobe(struct kprobe *p)
  381. {
  382. if (p->addr == (kprobe_opcode_t *) &kretprobe_trampoline)
  383. return 1;
  384. return 0;
  385. }
  386. void trap_is_kprobe(unsigned long address, struct pt_regs *regs)
  387. {
  388. notify_die(DIE_TRAP, "kprobe_trap", regs, address, 0, SIGTRAP);
  389. }