dsemul.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304
  1. #include <linux/err.h>
  2. #include <linux/slab.h>
  3. #include <asm/branch.h>
  4. #include <asm/cacheflush.h>
  5. #include <asm/fpu_emulator.h>
  6. #include <asm/inst.h>
  7. #include <asm/mipsregs.h>
  8. #include <asm/uaccess.h>
  9. /**
  10. * struct emuframe - The 'emulation' frame structure
  11. * @emul: The instruction to 'emulate'.
  12. * @badinst: A break instruction to cause a return to the kernel.
  13. *
  14. * This structure defines the frames placed within the delay slot emulation
  15. * page in response to a call to mips_dsemul(). Each thread may be allocated
  16. * only one frame at any given time. The kernel stores within it the
  17. * instruction to be 'emulated' followed by a break instruction, then
  18. * executes the frame in user mode. The break causes a trap to the kernel
  19. * which leads to do_dsemulret() being called unless the instruction in
  20. * @emul causes a trap itself, is a branch, or a signal is delivered to
  21. * the thread. In these cases the allocated frame will either be reused by
  22. * a subsequent delay slot 'emulation', or be freed during signal delivery or
  23. * upon thread exit.
  24. *
  25. * This approach is used because:
  26. *
  27. * - Actually emulating all instructions isn't feasible. We would need to
  28. * be able to handle instructions from all revisions of the MIPS ISA,
  29. * all ASEs & all vendor instruction set extensions. This would be a
  30. * whole lot of work & continual maintenance burden as new instructions
  31. * are introduced, and in the case of some vendor extensions may not
  32. * even be possible. Thus we need to take the approach of actually
  33. * executing the instruction.
  34. *
  35. * - We must execute the instruction within user context. If we were to
  36. * execute the instruction in kernel mode then it would have access to
  37. * kernel resources without very careful checks, leaving us with a
  38. * high potential for security or stability issues to arise.
  39. *
  40. * - We used to place the frame on the users stack, but this requires
  41. * that the stack be executable. This is bad for security so the
  42. * per-process page is now used instead.
  43. *
  44. * - The instruction in @emul may be something entirely invalid for a
  45. * delay slot. The user may (intentionally or otherwise) place a branch
  46. * in a delay slot, or a kernel mode instruction, or something else
  47. * which generates an exception. Thus we can't rely upon the break in
  48. * @badinst always being hit. For this reason we track the index of the
  49. * frame allocated to each thread, allowing us to clean it up at later
  50. * points such as signal delivery or thread exit.
  51. *
  52. * - The user may generate a fake struct emuframe if they wish, invoking
  53. * the BRK_MEMU break instruction themselves. We must therefore not
  54. * trust that BRK_MEMU means there's actually a valid frame allocated
  55. * to the thread, and must not allow the user to do anything they
  56. * couldn't already.
  57. */
  58. struct emuframe {
  59. mips_instruction emul;
  60. mips_instruction badinst;
  61. };
  62. static const int emupage_frame_count = PAGE_SIZE / sizeof(struct emuframe);
  63. static inline __user struct emuframe *dsemul_page(void)
  64. {
  65. return (__user struct emuframe *)STACK_TOP;
  66. }
  67. static int alloc_emuframe(void)
  68. {
  69. mm_context_t *mm_ctx = &current->mm->context;
  70. int idx;
  71. retry:
  72. spin_lock(&mm_ctx->bd_emupage_lock);
  73. /* Ensure we have an allocation bitmap */
  74. if (!mm_ctx->bd_emupage_allocmap) {
  75. mm_ctx->bd_emupage_allocmap =
  76. kcalloc(BITS_TO_LONGS(emupage_frame_count),
  77. sizeof(unsigned long),
  78. GFP_ATOMIC);
  79. if (!mm_ctx->bd_emupage_allocmap) {
  80. idx = BD_EMUFRAME_NONE;
  81. goto out_unlock;
  82. }
  83. }
  84. /* Attempt to allocate a single bit/frame */
  85. idx = bitmap_find_free_region(mm_ctx->bd_emupage_allocmap,
  86. emupage_frame_count, 0);
  87. if (idx < 0) {
  88. /*
  89. * Failed to allocate a frame. We'll wait until one becomes
  90. * available. We unlock the page so that other threads actually
  91. * get the opportunity to free their frames, which means
  92. * technically the result of bitmap_full may be incorrect.
  93. * However the worst case is that we repeat all this and end up
  94. * back here again.
  95. */
  96. spin_unlock(&mm_ctx->bd_emupage_lock);
  97. if (!wait_event_killable(mm_ctx->bd_emupage_queue,
  98. !bitmap_full(mm_ctx->bd_emupage_allocmap,
  99. emupage_frame_count)))
  100. goto retry;
  101. /* Received a fatal signal - just give in */
  102. return BD_EMUFRAME_NONE;
  103. }
  104. /* Success! */
  105. pr_debug("allocate emuframe %d to %d\n", idx, current->pid);
  106. out_unlock:
  107. spin_unlock(&mm_ctx->bd_emupage_lock);
  108. return idx;
  109. }
  110. static void free_emuframe(int idx, struct mm_struct *mm)
  111. {
  112. mm_context_t *mm_ctx = &mm->context;
  113. spin_lock(&mm_ctx->bd_emupage_lock);
  114. pr_debug("free emuframe %d from %d\n", idx, current->pid);
  115. bitmap_clear(mm_ctx->bd_emupage_allocmap, idx, 1);
  116. /* If some thread is waiting for a frame, now's its chance */
  117. wake_up(&mm_ctx->bd_emupage_queue);
  118. spin_unlock(&mm_ctx->bd_emupage_lock);
  119. }
  120. static bool within_emuframe(struct pt_regs *regs)
  121. {
  122. unsigned long base = (unsigned long)dsemul_page();
  123. if (regs->cp0_epc < base)
  124. return false;
  125. if (regs->cp0_epc >= (base + PAGE_SIZE))
  126. return false;
  127. return true;
  128. }
  129. bool dsemul_thread_cleanup(struct task_struct *tsk)
  130. {
  131. int fr_idx;
  132. /* Clear any allocated frame, retrieving its index */
  133. fr_idx = atomic_xchg(&tsk->thread.bd_emu_frame, BD_EMUFRAME_NONE);
  134. /* If no frame was allocated, we're done */
  135. if (fr_idx == BD_EMUFRAME_NONE)
  136. return false;
  137. task_lock(tsk);
  138. /* Free the frame that this thread had allocated */
  139. if (tsk->mm)
  140. free_emuframe(fr_idx, tsk->mm);
  141. task_unlock(tsk);
  142. return true;
  143. }
  144. bool dsemul_thread_rollback(struct pt_regs *regs)
  145. {
  146. struct emuframe __user *fr;
  147. int fr_idx;
  148. /* Do nothing if we're not executing from a frame */
  149. if (!within_emuframe(regs))
  150. return false;
  151. /* Find the frame being executed */
  152. fr_idx = atomic_read(&current->thread.bd_emu_frame);
  153. if (fr_idx == BD_EMUFRAME_NONE)
  154. return false;
  155. fr = &dsemul_page()[fr_idx];
  156. /*
  157. * If the PC is at the emul instruction, roll back to the branch. If
  158. * PC is at the badinst (break) instruction, we've already emulated the
  159. * instruction so progress to the continue PC. If it's anything else
  160. * then something is amiss & the user has branched into some other area
  161. * of the emupage - we'll free the allocated frame anyway.
  162. */
  163. if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->emul)
  164. regs->cp0_epc = current->thread.bd_emu_branch_pc;
  165. else if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->badinst)
  166. regs->cp0_epc = current->thread.bd_emu_cont_pc;
  167. atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
  168. free_emuframe(fr_idx, current->mm);
  169. return true;
  170. }
  171. void dsemul_mm_cleanup(struct mm_struct *mm)
  172. {
  173. mm_context_t *mm_ctx = &mm->context;
  174. kfree(mm_ctx->bd_emupage_allocmap);
  175. }
  176. int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
  177. unsigned long branch_pc, unsigned long cont_pc)
  178. {
  179. int isa16 = get_isa16_mode(regs->cp0_epc);
  180. mips_instruction break_math;
  181. struct emuframe __user *fr;
  182. int err, fr_idx;
  183. /* NOP is easy */
  184. if (ir == 0)
  185. return -1;
  186. /* microMIPS instructions */
  187. if (isa16) {
  188. union mips_instruction insn = { .word = ir };
  189. /* NOP16 aka MOVE16 $0, $0 */
  190. if ((ir >> 16) == MM_NOP16)
  191. return -1;
  192. /* ADDIUPC */
  193. if (insn.mm_a_format.opcode == mm_addiupc_op) {
  194. unsigned int rs;
  195. s32 v;
  196. rs = (((insn.mm_a_format.rs + 0xe) & 0xf) + 2);
  197. v = regs->cp0_epc & ~3;
  198. v += insn.mm_a_format.simmediate << 2;
  199. regs->regs[rs] = (long)v;
  200. return -1;
  201. }
  202. }
  203. pr_debug("dsemul 0x%08lx cont at 0x%08lx\n", regs->cp0_epc, cont_pc);
  204. /* Allocate a frame if we don't already have one */
  205. fr_idx = atomic_read(&current->thread.bd_emu_frame);
  206. if (fr_idx == BD_EMUFRAME_NONE)
  207. fr_idx = alloc_emuframe();
  208. if (fr_idx == BD_EMUFRAME_NONE)
  209. return SIGBUS;
  210. fr = &dsemul_page()[fr_idx];
  211. /* Retrieve the appropriately encoded break instruction */
  212. break_math = BREAK_MATH(isa16);
  213. /* Write the instructions to the frame */
  214. if (isa16) {
  215. err = __put_user(ir >> 16,
  216. (u16 __user *)(&fr->emul));
  217. err |= __put_user(ir & 0xffff,
  218. (u16 __user *)((long)(&fr->emul) + 2));
  219. err |= __put_user(break_math >> 16,
  220. (u16 __user *)(&fr->badinst));
  221. err |= __put_user(break_math & 0xffff,
  222. (u16 __user *)((long)(&fr->badinst) + 2));
  223. } else {
  224. err = __put_user(ir, &fr->emul);
  225. err |= __put_user(break_math, &fr->badinst);
  226. }
  227. if (unlikely(err)) {
  228. MIPS_FPU_EMU_INC_STATS(errors);
  229. free_emuframe(fr_idx, current->mm);
  230. return SIGBUS;
  231. }
  232. /* Record the PC of the branch, PC to continue from & frame index */
  233. current->thread.bd_emu_branch_pc = branch_pc;
  234. current->thread.bd_emu_cont_pc = cont_pc;
  235. atomic_set(&current->thread.bd_emu_frame, fr_idx);
  236. /* Change user register context to execute the frame */
  237. regs->cp0_epc = (unsigned long)&fr->emul | isa16;
  238. /* Ensure the icache observes our newly written frame */
  239. flush_cache_sigtramp((unsigned long)&fr->emul);
  240. return 0;
  241. }
  242. bool do_dsemulret(struct pt_regs *xcp)
  243. {
  244. /* Cleanup the allocated frame, returning if there wasn't one */
  245. if (!dsemul_thread_cleanup(current)) {
  246. MIPS_FPU_EMU_INC_STATS(errors);
  247. return false;
  248. }
  249. /* Set EPC to return to post-branch instruction */
  250. xcp->cp0_epc = current->thread.bd_emu_cont_pc;
  251. pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
  252. MIPS_FPU_EMU_INC_STATS(ds_emul);
  253. return true;
  254. }