signal.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728
  1. /*
  2. * linux/arch/arm/kernel/signal.c
  3. *
  4. * Copyright (C) 1995-2009 Russell King
  5. *
  6. * This program is free software; you can redistribute it and/or modify
  7. * it under the terms of the GNU General Public License version 2 as
  8. * published by the Free Software Foundation.
  9. */
  10. #include <linux/errno.h>
  11. #include <linux/random.h>
  12. #include <linux/signal.h>
  13. #include <linux/personality.h>
  14. #include <linux/uaccess.h>
  15. #include <linux/tracehook.h>
  16. #include <linux/uprobes.h>
  17. #include <linux/syscalls.h>
  18. #include <asm/elf.h>
  19. #include <asm/cacheflush.h>
  20. #include <asm/traps.h>
  21. #include <asm/unistd.h>
  22. #include <asm/vfp.h>
  23. #include "signal.h"
  24. extern const unsigned long sigreturn_codes[17];
  25. static unsigned long signal_return_offset;
  26. #ifdef CONFIG_CRUNCH
  27. static int preserve_crunch_context(struct crunch_sigframe __user *frame)
  28. {
  29. char kbuf[sizeof(*frame) + 8];
  30. struct crunch_sigframe *kframe;
  31. /* the crunch context must be 64 bit aligned */
  32. kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
  33. kframe->magic = CRUNCH_MAGIC;
  34. kframe->size = CRUNCH_STORAGE_SIZE;
  35. crunch_task_copy(current_thread_info(), &kframe->storage);
  36. return __copy_to_user(frame, kframe, sizeof(*frame));
  37. }
  38. static int restore_crunch_context(char __user **auxp)
  39. {
  40. struct crunch_sigframe __user *frame =
  41. (struct crunch_sigframe __user *)*auxp;
  42. char kbuf[sizeof(*frame) + 8];
  43. struct crunch_sigframe *kframe;
  44. /* the crunch context must be 64 bit aligned */
  45. kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
  46. if (__copy_from_user(kframe, frame, sizeof(*frame)))
  47. return -1;
  48. if (kframe->magic != CRUNCH_MAGIC ||
  49. kframe->size != CRUNCH_STORAGE_SIZE)
  50. return -1;
  51. *auxp += CRUNCH_STORAGE_SIZE;
  52. crunch_task_restore(current_thread_info(), &kframe->storage);
  53. return 0;
  54. }
  55. #endif
  56. #ifdef CONFIG_IWMMXT
  57. static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
  58. {
  59. char kbuf[sizeof(*frame) + 8];
  60. struct iwmmxt_sigframe *kframe;
  61. int err = 0;
  62. /* the iWMMXt context must be 64 bit aligned */
  63. kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
  64. if (test_thread_flag(TIF_USING_IWMMXT)) {
  65. kframe->magic = IWMMXT_MAGIC;
  66. kframe->size = IWMMXT_STORAGE_SIZE;
  67. iwmmxt_task_copy(current_thread_info(), &kframe->storage);
  68. } else {
  69. /*
  70. * For bug-compatibility with older kernels, some space
  71. * has to be reserved for iWMMXt even if it's not used.
  72. * Set the magic and size appropriately so that properly
  73. * written userspace can skip it reliably:
  74. */
  75. *kframe = (struct iwmmxt_sigframe) {
  76. .magic = DUMMY_MAGIC,
  77. .size = IWMMXT_STORAGE_SIZE,
  78. };
  79. }
  80. err = __copy_to_user(frame, kframe, sizeof(*kframe));
  81. return err;
  82. }
  83. static int restore_iwmmxt_context(char __user **auxp)
  84. {
  85. struct iwmmxt_sigframe __user *frame =
  86. (struct iwmmxt_sigframe __user *)*auxp;
  87. char kbuf[sizeof(*frame) + 8];
  88. struct iwmmxt_sigframe *kframe;
  89. /* the iWMMXt context must be 64 bit aligned */
  90. kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
  91. if (__copy_from_user(kframe, frame, sizeof(*frame)))
  92. return -1;
  93. /*
  94. * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy
  95. * block is discarded for compatibility with setup_sigframe() if
  96. * present, but we don't mandate its presence. If some other
  97. * magic is here, it's not for us:
  98. */
  99. if (!test_thread_flag(TIF_USING_IWMMXT) &&
  100. kframe->magic != DUMMY_MAGIC)
  101. return 0;
  102. if (kframe->size != IWMMXT_STORAGE_SIZE)
  103. return -1;
  104. if (test_thread_flag(TIF_USING_IWMMXT)) {
  105. if (kframe->magic != IWMMXT_MAGIC)
  106. return -1;
  107. iwmmxt_task_restore(current_thread_info(), &kframe->storage);
  108. }
  109. *auxp += IWMMXT_STORAGE_SIZE;
  110. return 0;
  111. }
  112. #endif
  113. #ifdef CONFIG_VFP
  114. static int preserve_vfp_context(struct vfp_sigframe __user *frame)
  115. {
  116. struct vfp_sigframe kframe;
  117. int err = 0;
  118. memset(&kframe, 0, sizeof(kframe));
  119. kframe.magic = VFP_MAGIC;
  120. kframe.size = VFP_STORAGE_SIZE;
  121. err = vfp_preserve_user_clear_hwstate(&kframe.ufp, &kframe.ufp_exc);
  122. if (err)
  123. return err;
  124. return __copy_to_user(frame, &kframe, sizeof(kframe));
  125. }
  126. static int restore_vfp_context(char __user **auxp)
  127. {
  128. struct vfp_sigframe frame;
  129. int err;
  130. err = __copy_from_user(&frame, *auxp, sizeof(frame));
  131. if (err)
  132. return err;
  133. if (frame.magic != VFP_MAGIC || frame.size != VFP_STORAGE_SIZE)
  134. return -EINVAL;
  135. *auxp += sizeof(frame);
  136. return vfp_restore_user_hwstate(&frame.ufp, &frame.ufp_exc);
  137. }
  138. #endif
  139. /*
  140. * Do a signal return; undo the signal stack. These are aligned to 64-bit.
  141. */
  142. static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
  143. {
  144. struct sigcontext context;
  145. char __user *aux;
  146. sigset_t set;
  147. int err;
  148. err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
  149. if (err == 0)
  150. set_current_blocked(&set);
  151. err |= __copy_from_user(&context, &sf->uc.uc_mcontext, sizeof(context));
  152. if (err == 0) {
  153. regs->ARM_r0 = context.arm_r0;
  154. regs->ARM_r1 = context.arm_r1;
  155. regs->ARM_r2 = context.arm_r2;
  156. regs->ARM_r3 = context.arm_r3;
  157. regs->ARM_r4 = context.arm_r4;
  158. regs->ARM_r5 = context.arm_r5;
  159. regs->ARM_r6 = context.arm_r6;
  160. regs->ARM_r7 = context.arm_r7;
  161. regs->ARM_r8 = context.arm_r8;
  162. regs->ARM_r9 = context.arm_r9;
  163. regs->ARM_r10 = context.arm_r10;
  164. regs->ARM_fp = context.arm_fp;
  165. regs->ARM_ip = context.arm_ip;
  166. regs->ARM_sp = context.arm_sp;
  167. regs->ARM_lr = context.arm_lr;
  168. regs->ARM_pc = context.arm_pc;
  169. regs->ARM_cpsr = context.arm_cpsr;
  170. }
  171. err |= !valid_user_regs(regs);
  172. aux = (char __user *) sf->uc.uc_regspace;
  173. #ifdef CONFIG_CRUNCH
  174. if (err == 0)
  175. err |= restore_crunch_context(&aux);
  176. #endif
  177. #ifdef CONFIG_IWMMXT
  178. if (err == 0)
  179. err |= restore_iwmmxt_context(&aux);
  180. #endif
  181. #ifdef CONFIG_VFP
  182. if (err == 0)
  183. err |= restore_vfp_context(&aux);
  184. #endif
  185. return err;
  186. }
  187. asmlinkage int sys_sigreturn(struct pt_regs *regs)
  188. {
  189. struct sigframe __user *frame;
  190. /* Always make any pending restarted system calls return -EINTR */
  191. current->restart_block.fn = do_no_restart_syscall;
  192. /*
  193. * Since we stacked the signal on a 64-bit boundary,
  194. * then 'sp' should be word aligned here. If it's
  195. * not, then the user is trying to mess with us.
  196. */
  197. if (regs->ARM_sp & 7)
  198. goto badframe;
  199. frame = (struct sigframe __user *)regs->ARM_sp;
  200. if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
  201. goto badframe;
  202. if (restore_sigframe(regs, frame))
  203. goto badframe;
  204. return regs->ARM_r0;
  205. badframe:
  206. force_sig(SIGSEGV, current);
  207. return 0;
  208. }
  209. asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
  210. {
  211. struct rt_sigframe __user *frame;
  212. /* Always make any pending restarted system calls return -EINTR */
  213. current->restart_block.fn = do_no_restart_syscall;
  214. /*
  215. * Since we stacked the signal on a 64-bit boundary,
  216. * then 'sp' should be word aligned here. If it's
  217. * not, then the user is trying to mess with us.
  218. */
  219. if (regs->ARM_sp & 7)
  220. goto badframe;
  221. frame = (struct rt_sigframe __user *)regs->ARM_sp;
  222. if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
  223. goto badframe;
  224. if (restore_sigframe(regs, &frame->sig))
  225. goto badframe;
  226. if (restore_altstack(&frame->sig.uc.uc_stack))
  227. goto badframe;
  228. return regs->ARM_r0;
  229. badframe:
  230. force_sig(SIGSEGV, current);
  231. return 0;
  232. }
  233. static int
  234. setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
  235. {
  236. struct aux_sigframe __user *aux;
  237. struct sigcontext context;
  238. int err = 0;
  239. context = (struct sigcontext) {
  240. .arm_r0 = regs->ARM_r0,
  241. .arm_r1 = regs->ARM_r1,
  242. .arm_r2 = regs->ARM_r2,
  243. .arm_r3 = regs->ARM_r3,
  244. .arm_r4 = regs->ARM_r4,
  245. .arm_r5 = regs->ARM_r5,
  246. .arm_r6 = regs->ARM_r6,
  247. .arm_r7 = regs->ARM_r7,
  248. .arm_r8 = regs->ARM_r8,
  249. .arm_r9 = regs->ARM_r9,
  250. .arm_r10 = regs->ARM_r10,
  251. .arm_fp = regs->ARM_fp,
  252. .arm_ip = regs->ARM_ip,
  253. .arm_sp = regs->ARM_sp,
  254. .arm_lr = regs->ARM_lr,
  255. .arm_pc = regs->ARM_pc,
  256. .arm_cpsr = regs->ARM_cpsr,
  257. .trap_no = current->thread.trap_no,
  258. .error_code = current->thread.error_code,
  259. .fault_address = current->thread.address,
  260. .oldmask = set->sig[0],
  261. };
  262. err |= __copy_to_user(&sf->uc.uc_mcontext, &context, sizeof(context));
  263. err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
  264. aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
  265. #ifdef CONFIG_CRUNCH
  266. if (err == 0)
  267. err |= preserve_crunch_context(&aux->crunch);
  268. #endif
  269. #ifdef CONFIG_IWMMXT
  270. if (err == 0)
  271. err |= preserve_iwmmxt_context(&aux->iwmmxt);
  272. #endif
  273. #ifdef CONFIG_VFP
  274. if (err == 0)
  275. err |= preserve_vfp_context(&aux->vfp);
  276. #endif
  277. err |= __put_user(0, &aux->end_magic);
  278. return err;
  279. }
  280. static inline void __user *
  281. get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize)
  282. {
  283. unsigned long sp = sigsp(regs->ARM_sp, ksig);
  284. void __user *frame;
  285. /*
  286. * ATPCS B01 mandates 8-byte alignment
  287. */
  288. frame = (void __user *)((sp - framesize) & ~7);
  289. /*
  290. * Check that we can actually write to the signal frame.
  291. */
  292. if (!access_ok(VERIFY_WRITE, frame, framesize))
  293. frame = NULL;
  294. return frame;
  295. }
  296. static int
  297. setup_return(struct pt_regs *regs, struct ksignal *ksig,
  298. unsigned long __user *rc, void __user *frame)
  299. {
  300. unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
  301. unsigned long handler_fdpic_GOT = 0;
  302. unsigned long retcode;
  303. unsigned int idx, thumb = 0;
  304. unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
  305. bool fdpic = IS_ENABLED(CONFIG_BINFMT_ELF_FDPIC) &&
  306. (current->personality & FDPIC_FUNCPTRS);
  307. if (fdpic) {
  308. unsigned long __user *fdpic_func_desc =
  309. (unsigned long __user *)handler;
  310. if (__get_user(handler, &fdpic_func_desc[0]) ||
  311. __get_user(handler_fdpic_GOT, &fdpic_func_desc[1]))
  312. return 1;
  313. }
  314. cpsr |= PSR_ENDSTATE;
  315. /*
  316. * Maybe we need to deliver a 32-bit signal to a 26-bit task.
  317. */
  318. if (ksig->ka.sa.sa_flags & SA_THIRTYTWO)
  319. cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
  320. #ifdef CONFIG_ARM_THUMB
  321. if (elf_hwcap & HWCAP_THUMB) {
  322. /*
  323. * The LSB of the handler determines if we're going to
  324. * be using THUMB or ARM mode for this signal handler.
  325. */
  326. thumb = handler & 1;
  327. /*
  328. * Clear the If-Then Thumb-2 execution state. ARM spec
  329. * requires this to be all 000s in ARM mode. Snapdragon
  330. * S4/Krait misbehaves on a Thumb=>ARM signal transition
  331. * without this.
  332. *
  333. * We must do this whenever we are running on a Thumb-2
  334. * capable CPU, which includes ARMv6T2. However, we elect
  335. * to always do this to simplify the code; this field is
  336. * marked UNK/SBZP for older architectures.
  337. */
  338. cpsr &= ~PSR_IT_MASK;
  339. if (thumb) {
  340. cpsr |= PSR_T_BIT;
  341. } else
  342. cpsr &= ~PSR_T_BIT;
  343. }
  344. #endif
  345. if (ksig->ka.sa.sa_flags & SA_RESTORER) {
  346. retcode = (unsigned long)ksig->ka.sa.sa_restorer;
  347. if (fdpic) {
  348. /*
  349. * We need code to load the function descriptor.
  350. * That code follows the standard sigreturn code
  351. * (6 words), and is made of 3 + 2 words for each
  352. * variant. The 4th copied word is the actual FD
  353. * address that the assembly code expects.
  354. */
  355. idx = 6 + thumb * 3;
  356. if (ksig->ka.sa.sa_flags & SA_SIGINFO)
  357. idx += 5;
  358. if (__put_user(sigreturn_codes[idx], rc ) ||
  359. __put_user(sigreturn_codes[idx+1], rc+1) ||
  360. __put_user(sigreturn_codes[idx+2], rc+2) ||
  361. __put_user(retcode, rc+3))
  362. return 1;
  363. goto rc_finish;
  364. }
  365. } else {
  366. idx = thumb << 1;
  367. if (ksig->ka.sa.sa_flags & SA_SIGINFO)
  368. idx += 3;
  369. /*
  370. * Put the sigreturn code on the stack no matter which return
  371. * mechanism we use in order to remain ABI compliant
  372. */
  373. if (__put_user(sigreturn_codes[idx], rc) ||
  374. __put_user(sigreturn_codes[idx+1], rc+1))
  375. return 1;
  376. rc_finish:
  377. #ifdef CONFIG_MMU
  378. if (cpsr & MODE32_BIT) {
  379. struct mm_struct *mm = current->mm;
  380. /*
  381. * 32-bit code can use the signal return page
  382. * except when the MPU has protected the vectors
  383. * page from PL0
  384. */
  385. retcode = mm->context.sigpage + signal_return_offset +
  386. (idx << 2) + thumb;
  387. } else
  388. #endif
  389. {
  390. /*
  391. * Ensure that the instruction cache sees
  392. * the return code written onto the stack.
  393. */
  394. flush_icache_range((unsigned long)rc,
  395. (unsigned long)(rc + 3));
  396. retcode = ((unsigned long)rc) + thumb;
  397. }
  398. }
  399. regs->ARM_r0 = ksig->sig;
  400. regs->ARM_sp = (unsigned long)frame;
  401. regs->ARM_lr = retcode;
  402. regs->ARM_pc = handler;
  403. if (fdpic)
  404. regs->ARM_r9 = handler_fdpic_GOT;
  405. regs->ARM_cpsr = cpsr;
  406. return 0;
  407. }
  408. static int
  409. setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
  410. {
  411. struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
  412. int err = 0;
  413. if (!frame)
  414. return 1;
  415. /*
  416. * Set uc.uc_flags to a value which sc.trap_no would never have.
  417. */
  418. err = __put_user(0x5ac3c35a, &frame->uc.uc_flags);
  419. err |= setup_sigframe(frame, regs, set);
  420. if (err == 0)
  421. err = setup_return(regs, ksig, frame->retcode, frame);
  422. return err;
  423. }
  424. static int
  425. setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
  426. {
  427. struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
  428. int err = 0;
  429. if (!frame)
  430. return 1;
  431. err |= copy_siginfo_to_user(&frame->info, &ksig->info);
  432. err |= __put_user(0, &frame->sig.uc.uc_flags);
  433. err |= __put_user(NULL, &frame->sig.uc.uc_link);
  434. err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
  435. err |= setup_sigframe(&frame->sig, regs, set);
  436. if (err == 0)
  437. err = setup_return(regs, ksig, frame->sig.retcode, frame);
  438. if (err == 0) {
  439. /*
  440. * For realtime signals we must also set the second and third
  441. * arguments for the signal handler.
  442. * -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
  443. */
  444. regs->ARM_r1 = (unsigned long)&frame->info;
  445. regs->ARM_r2 = (unsigned long)&frame->sig.uc;
  446. }
  447. return err;
  448. }
  449. /*
  450. * OK, we're invoking a handler
  451. */
  452. static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
  453. {
  454. sigset_t *oldset = sigmask_to_save();
  455. int ret;
  456. /*
  457. * Increment event counter and perform fixup for the pre-signal
  458. * frame.
  459. */
  460. rseq_signal_deliver(ksig, regs);
  461. /*
  462. * Set up the stack frame
  463. */
  464. if (ksig->ka.sa.sa_flags & SA_SIGINFO)
  465. ret = setup_rt_frame(ksig, oldset, regs);
  466. else
  467. ret = setup_frame(ksig, oldset, regs);
  468. /*
  469. * Check that the resulting registers are actually sane.
  470. */
  471. ret |= !valid_user_regs(regs);
  472. signal_setup_done(ret, ksig, 0);
  473. }
  474. /*
  475. * Note that 'init' is a special process: it doesn't get signals it doesn't
  476. * want to handle. Thus you cannot kill init even with a SIGKILL even by
  477. * mistake.
  478. *
  479. * Note that we go through the signals twice: once to check the signals that
  480. * the kernel can handle, and then we build all the user-level signal handling
  481. * stack-frames in one go after that.
  482. */
  483. static int do_signal(struct pt_regs *regs, int syscall)
  484. {
  485. unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
  486. struct ksignal ksig;
  487. int restart = 0;
  488. /*
  489. * If we were from a system call, check for system call restarting...
  490. */
  491. if (syscall) {
  492. continue_addr = regs->ARM_pc;
  493. restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
  494. retval = regs->ARM_r0;
  495. /*
  496. * Prepare for system call restart. We do this here so that a
  497. * debugger will see the already changed PSW.
  498. */
  499. switch (retval) {
  500. case -ERESTART_RESTARTBLOCK:
  501. restart -= 2;
  502. case -ERESTARTNOHAND:
  503. case -ERESTARTSYS:
  504. case -ERESTARTNOINTR:
  505. restart++;
  506. regs->ARM_r0 = regs->ARM_ORIG_r0;
  507. regs->ARM_pc = restart_addr;
  508. break;
  509. }
  510. }
  511. /*
  512. * Get the signal to deliver. When running under ptrace, at this
  513. * point the debugger may change all our registers ...
  514. */
  515. /*
  516. * Depending on the signal settings we may need to revert the
  517. * decision to restart the system call. But skip this if a
  518. * debugger has chosen to restart at a different PC.
  519. */
  520. if (get_signal(&ksig)) {
  521. /* handler */
  522. if (unlikely(restart) && regs->ARM_pc == restart_addr) {
  523. if (retval == -ERESTARTNOHAND ||
  524. retval == -ERESTART_RESTARTBLOCK
  525. || (retval == -ERESTARTSYS
  526. && !(ksig.ka.sa.sa_flags & SA_RESTART))) {
  527. regs->ARM_r0 = -EINTR;
  528. regs->ARM_pc = continue_addr;
  529. }
  530. }
  531. handle_signal(&ksig, regs);
  532. } else {
  533. /* no handler */
  534. restore_saved_sigmask();
  535. if (unlikely(restart) && regs->ARM_pc == restart_addr) {
  536. regs->ARM_pc = continue_addr;
  537. return restart;
  538. }
  539. }
  540. return 0;
  541. }
  542. asmlinkage int
  543. do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
  544. {
  545. /*
  546. * The assembly code enters us with IRQs off, but it hasn't
  547. * informed the tracing code of that for efficiency reasons.
  548. * Update the trace code with the current status.
  549. */
  550. trace_hardirqs_off();
  551. do {
  552. if (likely(thread_flags & _TIF_NEED_RESCHED)) {
  553. schedule();
  554. } else {
  555. if (unlikely(!user_mode(regs)))
  556. return 0;
  557. local_irq_enable();
  558. if (thread_flags & _TIF_SIGPENDING) {
  559. int restart = do_signal(regs, syscall);
  560. if (unlikely(restart)) {
  561. /*
  562. * Restart without handlers.
  563. * Deal with it without leaving
  564. * the kernel space.
  565. */
  566. return restart;
  567. }
  568. syscall = 0;
  569. } else if (thread_flags & _TIF_UPROBE) {
  570. uprobe_notify_resume(regs);
  571. } else {
  572. clear_thread_flag(TIF_NOTIFY_RESUME);
  573. tracehook_notify_resume(regs);
  574. rseq_handle_notify_resume(NULL, regs);
  575. }
  576. }
  577. local_irq_disable();
  578. thread_flags = current_thread_info()->flags;
  579. } while (thread_flags & _TIF_WORK_MASK);
  580. return 0;
  581. }
  582. struct page *get_signal_page(void)
  583. {
  584. unsigned long ptr;
  585. unsigned offset;
  586. struct page *page;
  587. void *addr;
  588. page = alloc_pages(GFP_KERNEL, 0);
  589. if (!page)
  590. return NULL;
  591. addr = page_address(page);
  592. /* Give the signal return code some randomness */
  593. offset = 0x200 + (get_random_int() & 0x7fc);
  594. signal_return_offset = offset;
  595. /*
  596. * Copy signal return handlers into the vector page, and
  597. * set sigreturn to be a pointer to these.
  598. */
  599. memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
  600. ptr = (unsigned long)addr + offset;
  601. flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
  602. return page;
  603. }
  604. /* Defer to generic check */
  605. asmlinkage void addr_limit_check_failed(void)
  606. {
  607. addr_limit_user_check();
  608. }
  609. #ifdef CONFIG_DEBUG_RSEQ
  610. asmlinkage void do_rseq_syscall(struct pt_regs *regs)
  611. {
  612. rseq_syscall(regs);
  613. }
  614. #endif