unaligned.c 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267
  1. /*
  2. * Copyright (C) 2011-2012 Synopsys (www.synopsys.com)
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * vineetg : May 2011
  9. * -Adapted (from .26 to .35)
  10. * -original contribution by Tim.yao@amlogic.com
  11. *
  12. */
  13. #include <linux/types.h>
  14. #include <linux/perf_event.h>
  15. #include <linux/ptrace.h>
  16. #include <linux/uaccess.h>
  17. #include <asm/disasm.h>
  18. #ifdef CONFIG_CPU_BIG_ENDIAN
  19. #define BE 1
  20. #define FIRST_BYTE_16 "swap %1, %1\n swape %1, %1\n"
  21. #define FIRST_BYTE_32 "swape %1, %1\n"
  22. #else
  23. #define BE 0
  24. #define FIRST_BYTE_16
  25. #define FIRST_BYTE_32
  26. #endif
  27. #define __get8_unaligned_check(val, addr, err) \
  28. __asm__( \
  29. "1: ldb.ab %1, [%2, 1]\n" \
  30. "2:\n" \
  31. " .section .fixup,\"ax\"\n" \
  32. " .align 4\n" \
  33. "3: mov %0, 1\n" \
  34. " j 2b\n" \
  35. " .previous\n" \
  36. " .section __ex_table,\"a\"\n" \
  37. " .align 4\n" \
  38. " .long 1b, 3b\n" \
  39. " .previous\n" \
  40. : "=r" (err), "=&r" (val), "=r" (addr) \
  41. : "0" (err), "2" (addr))
  42. #define get16_unaligned_check(val, addr) \
  43. do { \
  44. unsigned int err = 0, v, a = addr; \
  45. __get8_unaligned_check(v, a, err); \
  46. val = v << ((BE) ? 8 : 0); \
  47. __get8_unaligned_check(v, a, err); \
  48. val |= v << ((BE) ? 0 : 8); \
  49. if (err) \
  50. goto fault; \
  51. } while (0)
  52. #define get32_unaligned_check(val, addr) \
  53. do { \
  54. unsigned int err = 0, v, a = addr; \
  55. __get8_unaligned_check(v, a, err); \
  56. val = v << ((BE) ? 24 : 0); \
  57. __get8_unaligned_check(v, a, err); \
  58. val |= v << ((BE) ? 16 : 8); \
  59. __get8_unaligned_check(v, a, err); \
  60. val |= v << ((BE) ? 8 : 16); \
  61. __get8_unaligned_check(v, a, err); \
  62. val |= v << ((BE) ? 0 : 24); \
  63. if (err) \
  64. goto fault; \
  65. } while (0)
  66. #define put16_unaligned_check(val, addr) \
  67. do { \
  68. unsigned int err = 0, v = val, a = addr;\
  69. \
  70. __asm__( \
  71. FIRST_BYTE_16 \
  72. "1: stb.ab %1, [%2, 1]\n" \
  73. " lsr %1, %1, 8\n" \
  74. "2: stb %1, [%2]\n" \
  75. "3:\n" \
  76. " .section .fixup,\"ax\"\n" \
  77. " .align 4\n" \
  78. "4: mov %0, 1\n" \
  79. " j 3b\n" \
  80. " .previous\n" \
  81. " .section __ex_table,\"a\"\n" \
  82. " .align 4\n" \
  83. " .long 1b, 4b\n" \
  84. " .long 2b, 4b\n" \
  85. " .previous\n" \
  86. : "=r" (err), "=&r" (v), "=&r" (a) \
  87. : "0" (err), "1" (v), "2" (a)); \
  88. \
  89. if (err) \
  90. goto fault; \
  91. } while (0)
  92. #define put32_unaligned_check(val, addr) \
  93. do { \
  94. unsigned int err = 0, v = val, a = addr;\
  95. \
  96. __asm__( \
  97. FIRST_BYTE_32 \
  98. "1: stb.ab %1, [%2, 1]\n" \
  99. " lsr %1, %1, 8\n" \
  100. "2: stb.ab %1, [%2, 1]\n" \
  101. " lsr %1, %1, 8\n" \
  102. "3: stb.ab %1, [%2, 1]\n" \
  103. " lsr %1, %1, 8\n" \
  104. "4: stb %1, [%2]\n" \
  105. "5:\n" \
  106. " .section .fixup,\"ax\"\n" \
  107. " .align 4\n" \
  108. "6: mov %0, 1\n" \
  109. " j 5b\n" \
  110. " .previous\n" \
  111. " .section __ex_table,\"a\"\n" \
  112. " .align 4\n" \
  113. " .long 1b, 6b\n" \
  114. " .long 2b, 6b\n" \
  115. " .long 3b, 6b\n" \
  116. " .long 4b, 6b\n" \
  117. " .previous\n" \
  118. : "=r" (err), "=&r" (v), "=&r" (a) \
  119. : "0" (err), "1" (v), "2" (a)); \
  120. \
  121. if (err) \
  122. goto fault; \
  123. } while (0)
  124. /* sysctl hooks */
  125. int unaligned_enabled __read_mostly = 1; /* Enabled by default */
  126. int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
  127. static void fixup_load(struct disasm_state *state, struct pt_regs *regs,
  128. struct callee_regs *cregs)
  129. {
  130. int val;
  131. /* register write back */
  132. if ((state->aa == 1) || (state->aa == 2)) {
  133. set_reg(state->wb_reg, state->src1 + state->src2, regs, cregs);
  134. if (state->aa == 2)
  135. state->src2 = 0;
  136. }
  137. if (state->zz == 0) {
  138. get32_unaligned_check(val, state->src1 + state->src2);
  139. } else {
  140. get16_unaligned_check(val, state->src1 + state->src2);
  141. if (state->x)
  142. val = (val << 16) >> 16;
  143. }
  144. if (state->pref == 0)
  145. set_reg(state->dest, val, regs, cregs);
  146. return;
  147. fault: state->fault = 1;
  148. }
  149. static void fixup_store(struct disasm_state *state, struct pt_regs *regs,
  150. struct callee_regs *cregs)
  151. {
  152. /* register write back */
  153. if ((state->aa == 1) || (state->aa == 2)) {
  154. set_reg(state->wb_reg, state->src2 + state->src3, regs, cregs);
  155. if (state->aa == 3)
  156. state->src3 = 0;
  157. } else if (state->aa == 3) {
  158. if (state->zz == 2) {
  159. set_reg(state->wb_reg, state->src2 + (state->src3 << 1),
  160. regs, cregs);
  161. } else if (!state->zz) {
  162. set_reg(state->wb_reg, state->src2 + (state->src3 << 2),
  163. regs, cregs);
  164. } else {
  165. goto fault;
  166. }
  167. }
  168. /* write fix-up */
  169. if (!state->zz)
  170. put32_unaligned_check(state->src1, state->src2 + state->src3);
  171. else
  172. put16_unaligned_check(state->src1, state->src2 + state->src3);
  173. return;
  174. fault: state->fault = 1;
  175. }
  176. /*
  177. * Handle an unaligned access
  178. * Returns 0 if successfully handled, 1 if some error happened
  179. */
  180. int misaligned_fixup(unsigned long address, struct pt_regs *regs,
  181. struct callee_regs *cregs)
  182. {
  183. struct disasm_state state;
  184. char buf[TASK_COMM_LEN];
  185. /* handle user mode only and only if enabled by sysadmin */
  186. if (!user_mode(regs) || !unaligned_enabled)
  187. return 1;
  188. if (no_unaligned_warning) {
  189. pr_warn_once("%s(%d) made unaligned access which was emulated"
  190. " by kernel assist\n. This can degrade application"
  191. " performance significantly\n. To enable further"
  192. " logging of such instances, please \n"
  193. " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n",
  194. get_task_comm(buf, current), task_pid_nr(current));
  195. } else {
  196. /* Add rate limiting if it gets down to it */
  197. pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n",
  198. get_task_comm(buf, current), task_pid_nr(current),
  199. address, regs->ret);
  200. }
  201. disasm_instr(regs->ret, &state, 1, regs, cregs);
  202. if (state.fault)
  203. goto fault;
  204. /* ldb/stb should not have unaligned exception */
  205. if ((state.zz == 1) || (state.di))
  206. goto fault;
  207. if (!state.write)
  208. fixup_load(&state, regs, cregs);
  209. else
  210. fixup_store(&state, regs, cregs);
  211. if (state.fault)
  212. goto fault;
  213. /* clear any remanants of delay slot */
  214. if (delay_mode(regs)) {
  215. regs->ret = regs->bta & ~1U;
  216. regs->status32 &= ~STATUS_DE_MASK;
  217. } else {
  218. regs->ret += state.instr_len;
  219. /* handle zero-overhead-loop */
  220. if ((regs->ret == regs->lp_end) && (regs->lp_count)) {
  221. regs->ret = regs->lp_start;
  222. regs->lp_count--;
  223. }
  224. }
  225. perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, address);
  226. return 0;
  227. fault:
  228. pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n",
  229. state.words[0], address);
  230. return 1;
  231. }