ptrace.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960
  1. /*
  2. * linux/arch/arm/kernel/ptrace.c
  3. *
  4. * By Ross Biro 1/23/92
  5. * edited by Linus Torvalds
  6. * ARM modifications Copyright (C) 2000 Russell King
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #include <linux/kernel.h>
  13. #include <linux/sched.h>
  14. #include <linux/mm.h>
  15. #include <linux/smp.h>
  16. #include <linux/ptrace.h>
  17. #include <linux/user.h>
  18. #include <linux/security.h>
  19. #include <linux/init.h>
  20. #include <linux/signal.h>
  21. #include <linux/uaccess.h>
  22. #include <linux/perf_event.h>
  23. #include <linux/hw_breakpoint.h>
  24. #include <linux/regset.h>
  25. #include <asm/pgtable.h>
  26. #include <asm/system.h>
  27. #include <asm/traps.h>
  28. #define REG_PC 15
  29. #define REG_PSR 16
  30. /*
  31. * does not yet catch signals sent when the child dies.
  32. * in exit.c or in signal.c.
  33. */
  34. #if 0
  35. /*
  36. * Breakpoint SWI instruction: SWI &9F0001
  37. */
  38. #define BREAKINST_ARM 0xef9f0001
  39. #define BREAKINST_THUMB 0xdf00 /* fill this in later */
  40. #else
  41. /*
  42. * New breakpoints - use an undefined instruction. The ARM architecture
  43. * reference manual guarantees that the following instruction space
  44. * will produce an undefined instruction exception on all CPUs:
  45. *
  46. * ARM: xxxx 0111 1111 xxxx xxxx xxxx 1111 xxxx
  47. * Thumb: 1101 1110 xxxx xxxx
  48. */
  49. #define BREAKINST_ARM 0xe7f001f0
  50. #define BREAKINST_THUMB 0xde01
  51. #endif
  52. struct pt_regs_offset {
  53. const char *name;
  54. int offset;
  55. };
  56. #define REG_OFFSET_NAME(r) \
  57. {.name = #r, .offset = offsetof(struct pt_regs, ARM_##r)}
  58. #define REG_OFFSET_END {.name = NULL, .offset = 0}
  59. static const struct pt_regs_offset regoffset_table[] = {
  60. REG_OFFSET_NAME(r0),
  61. REG_OFFSET_NAME(r1),
  62. REG_OFFSET_NAME(r2),
  63. REG_OFFSET_NAME(r3),
  64. REG_OFFSET_NAME(r4),
  65. REG_OFFSET_NAME(r5),
  66. REG_OFFSET_NAME(r6),
  67. REG_OFFSET_NAME(r7),
  68. REG_OFFSET_NAME(r8),
  69. REG_OFFSET_NAME(r9),
  70. REG_OFFSET_NAME(r10),
  71. REG_OFFSET_NAME(fp),
  72. REG_OFFSET_NAME(ip),
  73. REG_OFFSET_NAME(sp),
  74. REG_OFFSET_NAME(lr),
  75. REG_OFFSET_NAME(pc),
  76. REG_OFFSET_NAME(cpsr),
  77. REG_OFFSET_NAME(ORIG_r0),
  78. REG_OFFSET_END,
  79. };
  80. /**
  81. * regs_query_register_offset() - query register offset from its name
  82. * @name: the name of a register
  83. *
  84. * regs_query_register_offset() returns the offset of a register in struct
  85. * pt_regs from its name. If the name is invalid, this returns -EINVAL;
  86. */
  87. int regs_query_register_offset(const char *name)
  88. {
  89. const struct pt_regs_offset *roff;
  90. for (roff = regoffset_table; roff->name != NULL; roff++)
  91. if (!strcmp(roff->name, name))
  92. return roff->offset;
  93. return -EINVAL;
  94. }
  95. /**
  96. * regs_query_register_name() - query register name from its offset
  97. * @offset: the offset of a register in struct pt_regs.
  98. *
  99. * regs_query_register_name() returns the name of a register from its
  100. * offset in struct pt_regs. If the @offset is invalid, this returns NULL;
  101. */
  102. const char *regs_query_register_name(unsigned int offset)
  103. {
  104. const struct pt_regs_offset *roff;
  105. for (roff = regoffset_table; roff->name != NULL; roff++)
  106. if (roff->offset == offset)
  107. return roff->name;
  108. return NULL;
  109. }
  110. /**
  111. * regs_within_kernel_stack() - check the address in the stack
  112. * @regs: pt_regs which contains kernel stack pointer.
  113. * @addr: address which is checked.
  114. *
  115. * regs_within_kernel_stack() checks @addr is within the kernel stack page(s).
  116. * If @addr is within the kernel stack, it returns true. If not, returns false.
  117. */
  118. bool regs_within_kernel_stack(struct pt_regs *regs, unsigned long addr)
  119. {
  120. return ((addr & ~(THREAD_SIZE - 1)) ==
  121. (kernel_stack_pointer(regs) & ~(THREAD_SIZE - 1)));
  122. }
  123. /**
  124. * regs_get_kernel_stack_nth() - get Nth entry of the stack
  125. * @regs: pt_regs which contains kernel stack pointer.
  126. * @n: stack entry number.
  127. *
  128. * regs_get_kernel_stack_nth() returns @n th entry of the kernel stack which
  129. * is specified by @regs. If the @n th entry is NOT in the kernel stack,
  130. * this returns 0.
  131. */
  132. unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, unsigned int n)
  133. {
  134. unsigned long *addr = (unsigned long *)kernel_stack_pointer(regs);
  135. addr += n;
  136. if (regs_within_kernel_stack(regs, (unsigned long)addr))
  137. return *addr;
  138. else
  139. return 0;
  140. }
  141. /*
  142. * this routine will get a word off of the processes privileged stack.
  143. * the offset is how far from the base addr as stored in the THREAD.
  144. * this routine assumes that all the privileged stacks are in our
  145. * data space.
  146. */
  147. static inline long get_user_reg(struct task_struct *task, int offset)
  148. {
  149. return task_pt_regs(task)->uregs[offset];
  150. }
  151. /*
  152. * this routine will put a word on the processes privileged stack.
  153. * the offset is how far from the base addr as stored in the THREAD.
  154. * this routine assumes that all the privileged stacks are in our
  155. * data space.
  156. */
  157. static inline int
  158. put_user_reg(struct task_struct *task, int offset, long data)
  159. {
  160. struct pt_regs newregs, *regs = task_pt_regs(task);
  161. int ret = -EINVAL;
  162. newregs = *regs;
  163. newregs.uregs[offset] = data;
  164. if (valid_user_regs(&newregs)) {
  165. regs->uregs[offset] = data;
  166. ret = 0;
  167. }
  168. return ret;
  169. }
  170. /*
  171. * Called by kernel/ptrace.c when detaching..
  172. */
  173. void ptrace_disable(struct task_struct *child)
  174. {
  175. /* Nothing to do. */
  176. }
  177. /*
  178. * Handle hitting a breakpoint.
  179. */
  180. void ptrace_break(struct task_struct *tsk, struct pt_regs *regs)
  181. {
  182. siginfo_t info;
  183. info.si_signo = SIGTRAP;
  184. info.si_errno = 0;
  185. info.si_code = TRAP_BRKPT;
  186. info.si_addr = (void __user *)instruction_pointer(regs);
  187. force_sig_info(SIGTRAP, &info, tsk);
  188. }
  189. static int break_trap(struct pt_regs *regs, unsigned int instr)
  190. {
  191. ptrace_break(current, regs);
  192. return 0;
  193. }
  194. static struct undef_hook arm_break_hook = {
  195. .instr_mask = 0x0fffffff,
  196. .instr_val = 0x07f001f0,
  197. .cpsr_mask = PSR_T_BIT,
  198. .cpsr_val = 0,
  199. .fn = break_trap,
  200. };
  201. static struct undef_hook thumb_break_hook = {
  202. .instr_mask = 0xffff,
  203. .instr_val = 0xde01,
  204. .cpsr_mask = PSR_T_BIT,
  205. .cpsr_val = PSR_T_BIT,
  206. .fn = break_trap,
  207. };
  208. static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr)
  209. {
  210. unsigned int instr2;
  211. void __user *pc;
  212. /* Check the second half of the instruction. */
  213. pc = (void __user *)(instruction_pointer(regs) + 2);
  214. if (processor_mode(regs) == SVC_MODE) {
  215. instr2 = *(u16 *) pc;
  216. } else {
  217. get_user(instr2, (u16 __user *)pc);
  218. }
  219. if (instr2 == 0xa000) {
  220. ptrace_break(current, regs);
  221. return 0;
  222. } else {
  223. return 1;
  224. }
  225. }
  226. static struct undef_hook thumb2_break_hook = {
  227. .instr_mask = 0xffff,
  228. .instr_val = 0xf7f0,
  229. .cpsr_mask = PSR_T_BIT,
  230. .cpsr_val = PSR_T_BIT,
  231. .fn = thumb2_break_trap,
  232. };
  233. static int __init ptrace_break_init(void)
  234. {
  235. register_undef_hook(&arm_break_hook);
  236. register_undef_hook(&thumb_break_hook);
  237. register_undef_hook(&thumb2_break_hook);
  238. return 0;
  239. }
  240. core_initcall(ptrace_break_init);
  241. /*
  242. * Read the word at offset "off" into the "struct user". We
  243. * actually access the pt_regs stored on the kernel stack.
  244. */
  245. static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
  246. unsigned long __user *ret)
  247. {
  248. unsigned long tmp;
  249. if (off & 3 || off >= sizeof(struct user))
  250. return -EIO;
  251. tmp = 0;
  252. if (off == PT_TEXT_ADDR)
  253. tmp = tsk->mm->start_code;
  254. else if (off == PT_DATA_ADDR)
  255. tmp = tsk->mm->start_data;
  256. else if (off == PT_TEXT_END_ADDR)
  257. tmp = tsk->mm->end_code;
  258. else if (off < sizeof(struct pt_regs))
  259. tmp = get_user_reg(tsk, off >> 2);
  260. return put_user(tmp, ret);
  261. }
  262. /*
  263. * Write the word at offset "off" into "struct user". We
  264. * actually access the pt_regs stored on the kernel stack.
  265. */
  266. static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
  267. unsigned long val)
  268. {
  269. if (off & 3 || off >= sizeof(struct user))
  270. return -EIO;
  271. if (off >= sizeof(struct pt_regs))
  272. return 0;
  273. return put_user_reg(tsk, off >> 2, val);
  274. }
  275. #ifdef CONFIG_IWMMXT
  276. /*
  277. * Get the child iWMMXt state.
  278. */
  279. static int ptrace_getwmmxregs(struct task_struct *tsk, void __user *ufp)
  280. {
  281. struct thread_info *thread = task_thread_info(tsk);
  282. if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
  283. return -ENODATA;
  284. iwmmxt_task_disable(thread); /* force it to ram */
  285. return copy_to_user(ufp, &thread->fpstate.iwmmxt, IWMMXT_SIZE)
  286. ? -EFAULT : 0;
  287. }
  288. /*
  289. * Set the child iWMMXt state.
  290. */
  291. static int ptrace_setwmmxregs(struct task_struct *tsk, void __user *ufp)
  292. {
  293. struct thread_info *thread = task_thread_info(tsk);
  294. if (!test_ti_thread_flag(thread, TIF_USING_IWMMXT))
  295. return -EACCES;
  296. iwmmxt_task_release(thread); /* force a reload */
  297. return copy_from_user(&thread->fpstate.iwmmxt, ufp, IWMMXT_SIZE)
  298. ? -EFAULT : 0;
  299. }
  300. #endif
  301. #ifdef CONFIG_CRUNCH
  302. /*
  303. * Get the child Crunch state.
  304. */
  305. static int ptrace_getcrunchregs(struct task_struct *tsk, void __user *ufp)
  306. {
  307. struct thread_info *thread = task_thread_info(tsk);
  308. crunch_task_disable(thread); /* force it to ram */
  309. return copy_to_user(ufp, &thread->crunchstate, CRUNCH_SIZE)
  310. ? -EFAULT : 0;
  311. }
  312. /*
  313. * Set the child Crunch state.
  314. */
  315. static int ptrace_setcrunchregs(struct task_struct *tsk, void __user *ufp)
  316. {
  317. struct thread_info *thread = task_thread_info(tsk);
  318. crunch_task_release(thread); /* force a reload */
  319. return copy_from_user(&thread->crunchstate, ufp, CRUNCH_SIZE)
  320. ? -EFAULT : 0;
  321. }
  322. #endif
  323. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  324. /*
  325. * Convert a virtual register number into an index for a thread_info
  326. * breakpoint array. Breakpoints are identified using positive numbers
  327. * whilst watchpoints are negative. The registers are laid out as pairs
  328. * of (address, control), each pair mapping to a unique hw_breakpoint struct.
  329. * Register 0 is reserved for describing resource information.
  330. */
  331. static int ptrace_hbp_num_to_idx(long num)
  332. {
  333. if (num < 0)
  334. num = (ARM_MAX_BRP << 1) - num;
  335. return (num - 1) >> 1;
  336. }
  337. /*
  338. * Returns the virtual register number for the address of the
  339. * breakpoint at index idx.
  340. */
  341. static long ptrace_hbp_idx_to_num(int idx)
  342. {
  343. long mid = ARM_MAX_BRP << 1;
  344. long num = (idx << 1) + 1;
  345. return num > mid ? mid - num : num;
  346. }
  347. /*
  348. * Handle hitting a HW-breakpoint.
  349. */
  350. static void ptrace_hbptriggered(struct perf_event *bp, int unused,
  351. struct perf_sample_data *data,
  352. struct pt_regs *regs)
  353. {
  354. struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
  355. long num;
  356. int i;
  357. siginfo_t info;
  358. for (i = 0; i < ARM_MAX_HBP_SLOTS; ++i)
  359. if (current->thread.debug.hbp[i] == bp)
  360. break;
  361. num = (i == ARM_MAX_HBP_SLOTS) ? 0 : ptrace_hbp_idx_to_num(i);
  362. info.si_signo = SIGTRAP;
  363. info.si_errno = (int)num;
  364. info.si_code = TRAP_HWBKPT;
  365. info.si_addr = (void __user *)(bkpt->trigger);
  366. force_sig_info(SIGTRAP, &info, current);
  367. }
  368. /*
  369. * Set ptrace breakpoint pointers to zero for this task.
  370. * This is required in order to prevent child processes from unregistering
  371. * breakpoints held by their parent.
  372. */
  373. void clear_ptrace_hw_breakpoint(struct task_struct *tsk)
  374. {
  375. memset(tsk->thread.debug.hbp, 0, sizeof(tsk->thread.debug.hbp));
  376. }
  377. /*
  378. * Unregister breakpoints from this task and reset the pointers in
  379. * the thread_struct.
  380. */
  381. void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
  382. {
  383. int i;
  384. struct thread_struct *t = &tsk->thread;
  385. for (i = 0; i < ARM_MAX_HBP_SLOTS; i++) {
  386. if (t->debug.hbp[i]) {
  387. unregister_hw_breakpoint(t->debug.hbp[i]);
  388. t->debug.hbp[i] = NULL;
  389. }
  390. }
  391. }
  392. static u32 ptrace_get_hbp_resource_info(void)
  393. {
  394. u8 num_brps, num_wrps, debug_arch, wp_len;
  395. u32 reg = 0;
  396. num_brps = hw_breakpoint_slots(TYPE_INST);
  397. num_wrps = hw_breakpoint_slots(TYPE_DATA);
  398. debug_arch = arch_get_debug_arch();
  399. wp_len = arch_get_max_wp_len();
  400. reg |= debug_arch;
  401. reg <<= 8;
  402. reg |= wp_len;
  403. reg <<= 8;
  404. reg |= num_wrps;
  405. reg <<= 8;
  406. reg |= num_brps;
  407. return reg;
  408. }
  409. static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
  410. {
  411. struct perf_event_attr attr;
  412. ptrace_breakpoint_init(&attr);
  413. /* Initialise fields to sane defaults. */
  414. attr.bp_addr = 0;
  415. attr.bp_len = HW_BREAKPOINT_LEN_4;
  416. attr.bp_type = type;
  417. attr.disabled = 1;
  418. return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk);
  419. }
  420. static int ptrace_gethbpregs(struct task_struct *tsk, long num,
  421. unsigned long __user *data)
  422. {
  423. u32 reg;
  424. int idx, ret = 0;
  425. struct perf_event *bp;
  426. struct arch_hw_breakpoint_ctrl arch_ctrl;
  427. if (num == 0) {
  428. reg = ptrace_get_hbp_resource_info();
  429. } else {
  430. idx = ptrace_hbp_num_to_idx(num);
  431. if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
  432. ret = -EINVAL;
  433. goto out;
  434. }
  435. bp = tsk->thread.debug.hbp[idx];
  436. if (!bp) {
  437. reg = 0;
  438. goto put;
  439. }
  440. arch_ctrl = counter_arch_bp(bp)->ctrl;
  441. /*
  442. * Fix up the len because we may have adjusted it
  443. * to compensate for an unaligned address.
  444. */
  445. while (!(arch_ctrl.len & 0x1))
  446. arch_ctrl.len >>= 1;
  447. if (num & 0x1)
  448. reg = bp->attr.bp_addr;
  449. else
  450. reg = encode_ctrl_reg(arch_ctrl);
  451. }
  452. put:
  453. if (put_user(reg, data))
  454. ret = -EFAULT;
  455. out:
  456. return ret;
  457. }
  458. static int ptrace_sethbpregs(struct task_struct *tsk, long num,
  459. unsigned long __user *data)
  460. {
  461. int idx, gen_len, gen_type, implied_type, ret = 0;
  462. u32 user_val;
  463. struct perf_event *bp;
  464. struct arch_hw_breakpoint_ctrl ctrl;
  465. struct perf_event_attr attr;
  466. if (num == 0)
  467. goto out;
  468. else if (num < 0)
  469. implied_type = HW_BREAKPOINT_RW;
  470. else
  471. implied_type = HW_BREAKPOINT_X;
  472. idx = ptrace_hbp_num_to_idx(num);
  473. if (idx < 0 || idx >= ARM_MAX_HBP_SLOTS) {
  474. ret = -EINVAL;
  475. goto out;
  476. }
  477. if (get_user(user_val, data)) {
  478. ret = -EFAULT;
  479. goto out;
  480. }
  481. bp = tsk->thread.debug.hbp[idx];
  482. if (!bp) {
  483. bp = ptrace_hbp_create(tsk, implied_type);
  484. if (IS_ERR(bp)) {
  485. ret = PTR_ERR(bp);
  486. goto out;
  487. }
  488. tsk->thread.debug.hbp[idx] = bp;
  489. }
  490. attr = bp->attr;
  491. if (num & 0x1) {
  492. /* Address */
  493. attr.bp_addr = user_val;
  494. } else {
  495. /* Control */
  496. decode_ctrl_reg(user_val, &ctrl);
  497. ret = arch_bp_generic_fields(ctrl, &gen_len, &gen_type);
  498. if (ret)
  499. goto out;
  500. if ((gen_type & implied_type) != gen_type) {
  501. ret = -EINVAL;
  502. goto out;
  503. }
  504. attr.bp_len = gen_len;
  505. attr.bp_type = gen_type;
  506. attr.disabled = !ctrl.enabled;
  507. }
  508. ret = modify_user_hw_breakpoint(bp, &attr);
  509. out:
  510. return ret;
  511. }
  512. #endif
  513. /* regset get/set implementations */
  514. static int gpr_get(struct task_struct *target,
  515. const struct user_regset *regset,
  516. unsigned int pos, unsigned int count,
  517. void *kbuf, void __user *ubuf)
  518. {
  519. struct pt_regs *regs = task_pt_regs(target);
  520. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  521. regs,
  522. 0, sizeof(*regs));
  523. }
  524. static int gpr_set(struct task_struct *target,
  525. const struct user_regset *regset,
  526. unsigned int pos, unsigned int count,
  527. const void *kbuf, const void __user *ubuf)
  528. {
  529. int ret;
  530. struct pt_regs newregs;
  531. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  532. &newregs,
  533. 0, sizeof(newregs));
  534. if (ret)
  535. return ret;
  536. if (!valid_user_regs(&newregs))
  537. return -EINVAL;
  538. *task_pt_regs(target) = newregs;
  539. return 0;
  540. }
  541. static int fpa_get(struct task_struct *target,
  542. const struct user_regset *regset,
  543. unsigned int pos, unsigned int count,
  544. void *kbuf, void __user *ubuf)
  545. {
  546. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  547. &task_thread_info(target)->fpstate,
  548. 0, sizeof(struct user_fp));
  549. }
  550. static int fpa_set(struct task_struct *target,
  551. const struct user_regset *regset,
  552. unsigned int pos, unsigned int count,
  553. const void *kbuf, const void __user *ubuf)
  554. {
  555. struct thread_info *thread = task_thread_info(target);
  556. thread->used_cp[1] = thread->used_cp[2] = 1;
  557. return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  558. &thread->fpstate,
  559. 0, sizeof(struct user_fp));
  560. }
  561. #ifdef CONFIG_VFP
  562. /*
  563. * VFP register get/set implementations.
  564. *
  565. * With respect to the kernel, struct user_fp is divided into three chunks:
  566. * 16 or 32 real VFP registers (d0-d15 or d0-31)
  567. * These are transferred to/from the real registers in the task's
  568. * vfp_hard_struct. The number of registers depends on the kernel
  569. * configuration.
  570. *
  571. * 16 or 0 fake VFP registers (d16-d31 or empty)
  572. * i.e., the user_vfp structure has space for 32 registers even if
  573. * the kernel doesn't have them all.
  574. *
  575. * vfp_get() reads this chunk as zero where applicable
  576. * vfp_set() ignores this chunk
  577. *
  578. * 1 word for the FPSCR
  579. *
  580. * The bounds-checking logic built into user_regset_copyout and friends
  581. * means that we can make a simple sequence of calls to map the relevant data
  582. * to/from the specified slice of the user regset structure.
  583. */
  584. static int vfp_get(struct task_struct *target,
  585. const struct user_regset *regset,
  586. unsigned int pos, unsigned int count,
  587. void *kbuf, void __user *ubuf)
  588. {
  589. int ret;
  590. struct thread_info *thread = task_thread_info(target);
  591. struct vfp_hard_struct const *vfp = &thread->vfpstate.hard;
  592. const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
  593. const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
  594. vfp_sync_hwstate(thread);
  595. ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  596. &vfp->fpregs,
  597. user_fpregs_offset,
  598. user_fpregs_offset + sizeof(vfp->fpregs));
  599. if (ret)
  600. return ret;
  601. ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
  602. user_fpregs_offset + sizeof(vfp->fpregs),
  603. user_fpscr_offset);
  604. if (ret)
  605. return ret;
  606. return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
  607. &vfp->fpscr,
  608. user_fpscr_offset,
  609. user_fpscr_offset + sizeof(vfp->fpscr));
  610. }
  611. /*
  612. * For vfp_set() a read-modify-write is done on the VFP registers,
  613. * in order to avoid writing back a half-modified set of registers on
  614. * failure.
  615. */
  616. static int vfp_set(struct task_struct *target,
  617. const struct user_regset *regset,
  618. unsigned int pos, unsigned int count,
  619. const void *kbuf, const void __user *ubuf)
  620. {
  621. int ret;
  622. struct thread_info *thread = task_thread_info(target);
  623. struct vfp_hard_struct new_vfp = thread->vfpstate.hard;
  624. const size_t user_fpregs_offset = offsetof(struct user_vfp, fpregs);
  625. const size_t user_fpscr_offset = offsetof(struct user_vfp, fpscr);
  626. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  627. &new_vfp.fpregs,
  628. user_fpregs_offset,
  629. user_fpregs_offset + sizeof(new_vfp.fpregs));
  630. if (ret)
  631. return ret;
  632. ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
  633. user_fpregs_offset + sizeof(new_vfp.fpregs),
  634. user_fpscr_offset);
  635. if (ret)
  636. return ret;
  637. ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
  638. &new_vfp.fpscr,
  639. user_fpscr_offset,
  640. user_fpscr_offset + sizeof(new_vfp.fpscr));
  641. if (ret)
  642. return ret;
  643. vfp_sync_hwstate(thread);
  644. thread->vfpstate.hard = new_vfp;
  645. vfp_flush_hwstate(thread);
  646. return 0;
  647. }
  648. #endif /* CONFIG_VFP */
  649. enum arm_regset {
  650. REGSET_GPR,
  651. REGSET_FPR,
  652. #ifdef CONFIG_VFP
  653. REGSET_VFP,
  654. #endif
  655. };
  656. static const struct user_regset arm_regsets[] = {
  657. [REGSET_GPR] = {
  658. .core_note_type = NT_PRSTATUS,
  659. .n = ELF_NGREG,
  660. .size = sizeof(u32),
  661. .align = sizeof(u32),
  662. .get = gpr_get,
  663. .set = gpr_set
  664. },
  665. [REGSET_FPR] = {
  666. /*
  667. * For the FPA regs in fpstate, the real fields are a mixture
  668. * of sizes, so pretend that the registers are word-sized:
  669. */
  670. .core_note_type = NT_PRFPREG,
  671. .n = sizeof(struct user_fp) / sizeof(u32),
  672. .size = sizeof(u32),
  673. .align = sizeof(u32),
  674. .get = fpa_get,
  675. .set = fpa_set
  676. },
  677. #ifdef CONFIG_VFP
  678. [REGSET_VFP] = {
  679. /*
  680. * Pretend that the VFP regs are word-sized, since the FPSCR is
  681. * a single word dangling at the end of struct user_vfp:
  682. */
  683. .core_note_type = NT_ARM_VFP,
  684. .n = ARM_VFPREGS_SIZE / sizeof(u32),
  685. .size = sizeof(u32),
  686. .align = sizeof(u32),
  687. .get = vfp_get,
  688. .set = vfp_set
  689. },
  690. #endif /* CONFIG_VFP */
  691. };
  692. static const struct user_regset_view user_arm_view = {
  693. .name = "arm", .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
  694. .regsets = arm_regsets, .n = ARRAY_SIZE(arm_regsets)
  695. };
  696. const struct user_regset_view *task_user_regset_view(struct task_struct *task)
  697. {
  698. return &user_arm_view;
  699. }
  700. long arch_ptrace(struct task_struct *child, long request,
  701. unsigned long addr, unsigned long data)
  702. {
  703. int ret;
  704. unsigned long __user *datap = (unsigned long __user *) data;
  705. switch (request) {
  706. case PTRACE_PEEKUSR:
  707. ret = ptrace_read_user(child, addr, datap);
  708. break;
  709. case PTRACE_POKEUSR:
  710. ret = ptrace_write_user(child, addr, data);
  711. break;
  712. case PTRACE_GETREGS:
  713. ret = copy_regset_to_user(child,
  714. &user_arm_view, REGSET_GPR,
  715. 0, sizeof(struct pt_regs),
  716. datap);
  717. break;
  718. case PTRACE_SETREGS:
  719. ret = copy_regset_from_user(child,
  720. &user_arm_view, REGSET_GPR,
  721. 0, sizeof(struct pt_regs),
  722. datap);
  723. break;
  724. case PTRACE_GETFPREGS:
  725. ret = copy_regset_to_user(child,
  726. &user_arm_view, REGSET_FPR,
  727. 0, sizeof(union fp_state),
  728. datap);
  729. break;
  730. case PTRACE_SETFPREGS:
  731. ret = copy_regset_from_user(child,
  732. &user_arm_view, REGSET_FPR,
  733. 0, sizeof(union fp_state),
  734. datap);
  735. break;
  736. #ifdef CONFIG_IWMMXT
  737. case PTRACE_GETWMMXREGS:
  738. ret = ptrace_getwmmxregs(child, datap);
  739. break;
  740. case PTRACE_SETWMMXREGS:
  741. ret = ptrace_setwmmxregs(child, datap);
  742. break;
  743. #endif
  744. case PTRACE_GET_THREAD_AREA:
  745. ret = put_user(task_thread_info(child)->tp_value,
  746. datap);
  747. break;
  748. case PTRACE_SET_SYSCALL:
  749. task_thread_info(child)->syscall = data;
  750. ret = 0;
  751. break;
  752. #ifdef CONFIG_CRUNCH
  753. case PTRACE_GETCRUNCHREGS:
  754. ret = ptrace_getcrunchregs(child, datap);
  755. break;
  756. case PTRACE_SETCRUNCHREGS:
  757. ret = ptrace_setcrunchregs(child, datap);
  758. break;
  759. #endif
  760. #ifdef CONFIG_VFP
  761. case PTRACE_GETVFPREGS:
  762. ret = copy_regset_to_user(child,
  763. &user_arm_view, REGSET_VFP,
  764. 0, ARM_VFPREGS_SIZE,
  765. datap);
  766. break;
  767. case PTRACE_SETVFPREGS:
  768. ret = copy_regset_from_user(child,
  769. &user_arm_view, REGSET_VFP,
  770. 0, ARM_VFPREGS_SIZE,
  771. datap);
  772. break;
  773. #endif
  774. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  775. case PTRACE_GETHBPREGS:
  776. if (ptrace_get_breakpoints(child) < 0)
  777. return -ESRCH;
  778. ret = ptrace_gethbpregs(child, addr,
  779. (unsigned long __user *)data);
  780. ptrace_put_breakpoints(child);
  781. break;
  782. case PTRACE_SETHBPREGS:
  783. if (ptrace_get_breakpoints(child) < 0)
  784. return -ESRCH;
  785. ret = ptrace_sethbpregs(child, addr,
  786. (unsigned long __user *)data);
  787. ptrace_put_breakpoints(child);
  788. break;
  789. #endif
  790. default:
  791. ret = ptrace_request(child, request, addr, data);
  792. break;
  793. }
  794. return ret;
  795. }
  796. asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
  797. {
  798. unsigned long ip;
  799. if (!test_thread_flag(TIF_SYSCALL_TRACE))
  800. return scno;
  801. if (!(current->ptrace & PT_PTRACED))
  802. return scno;
  803. /*
  804. * Save IP. IP is used to denote syscall entry/exit:
  805. * IP = 0 -> entry, = 1 -> exit
  806. */
  807. ip = regs->ARM_ip;
  808. regs->ARM_ip = why;
  809. current_thread_info()->syscall = scno;
  810. /* the 0x80 provides a way for the tracing parent to distinguish
  811. between a syscall stop and SIGTRAP delivery */
  812. ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
  813. ? 0x80 : 0));
  814. /*
  815. * this isn't the same as continuing with a signal, but it will do
  816. * for normal use. strace only continues with a signal if the
  817. * stopping signal is not SIGTRAP. -brl
  818. */
  819. if (current->exit_code) {
  820. send_sig(current->exit_code, current, 1);
  821. current->exit_code = 0;
  822. }
  823. regs->ARM_ip = ip;
  824. return current_thread_info()->syscall;
  825. }