traps_64.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818
  1. /*
  2. * arch/sh/kernel/traps_64.c
  3. *
  4. * Copyright (C) 2000, 2001 Paolo Alberelli
  5. * Copyright (C) 2003, 2004 Paul Mundt
  6. * Copyright (C) 2003, 2004 Richard Curnow
  7. *
  8. * This file is subject to the terms and conditions of the GNU General Public
  9. * License. See the file "COPYING" in the main directory of this archive
  10. * for more details.
  11. */
  12. #include <linux/sched.h>
  13. #include <linux/sched/debug.h>
  14. #include <linux/kernel.h>
  15. #include <linux/string.h>
  16. #include <linux/errno.h>
  17. #include <linux/ptrace.h>
  18. #include <linux/timer.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/init.h>
  22. #include <linux/delay.h>
  23. #include <linux/spinlock.h>
  24. #include <linux/kallsyms.h>
  25. #include <linux/interrupt.h>
  26. #include <linux/sysctl.h>
  27. #include <linux/module.h>
  28. #include <linux/perf_event.h>
  29. #include <linux/uaccess.h>
  30. #include <asm/io.h>
  31. #include <asm/alignment.h>
  32. #include <asm/processor.h>
  33. #include <asm/pgtable.h>
  34. #include <asm/fpu.h>
  35. static int read_opcode(reg_size_t pc, insn_size_t *result_opcode, int from_user_mode)
  36. {
  37. int get_user_error;
  38. unsigned long aligned_pc;
  39. insn_size_t opcode;
  40. if ((pc & 3) == 1) {
  41. /* SHmedia */
  42. aligned_pc = pc & ~3;
  43. if (from_user_mode) {
  44. if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t))) {
  45. get_user_error = -EFAULT;
  46. } else {
  47. get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
  48. *result_opcode = opcode;
  49. }
  50. return get_user_error;
  51. } else {
  52. /* If the fault was in the kernel, we can either read
  53. * this directly, or if not, we fault.
  54. */
  55. *result_opcode = *(insn_size_t *)aligned_pc;
  56. return 0;
  57. }
  58. } else if ((pc & 1) == 0) {
  59. /* SHcompact */
  60. /* TODO : provide handling for this. We don't really support
  61. user-mode SHcompact yet, and for a kernel fault, this would
  62. have to come from a module built for SHcompact. */
  63. return -EFAULT;
  64. } else {
  65. /* misaligned */
  66. return -EFAULT;
  67. }
  68. }
  69. static int address_is_sign_extended(__u64 a)
  70. {
  71. __u64 b;
  72. #if (NEFF == 32)
  73. b = (__u64)(__s64)(__s32)(a & 0xffffffffUL);
  74. return (b == a) ? 1 : 0;
  75. #else
  76. #error "Sign extend check only works for NEFF==32"
  77. #endif
  78. }
  79. /* return -1 for fault, 0 for OK */
  80. static int generate_and_check_address(struct pt_regs *regs,
  81. insn_size_t opcode,
  82. int displacement_not_indexed,
  83. int width_shift,
  84. __u64 *address)
  85. {
  86. __u64 base_address, addr;
  87. int basereg;
  88. switch (1 << width_shift) {
  89. case 1: inc_unaligned_byte_access(); break;
  90. case 2: inc_unaligned_word_access(); break;
  91. case 4: inc_unaligned_dword_access(); break;
  92. case 8: inc_unaligned_multi_access(); break;
  93. }
  94. basereg = (opcode >> 20) & 0x3f;
  95. base_address = regs->regs[basereg];
  96. if (displacement_not_indexed) {
  97. __s64 displacement;
  98. displacement = (opcode >> 10) & 0x3ff;
  99. displacement = sign_extend64(displacement, 9);
  100. addr = (__u64)((__s64)base_address + (displacement << width_shift));
  101. } else {
  102. __u64 offset;
  103. int offsetreg;
  104. offsetreg = (opcode >> 10) & 0x3f;
  105. offset = regs->regs[offsetreg];
  106. addr = base_address + offset;
  107. }
  108. /* Check sign extended */
  109. if (!address_is_sign_extended(addr))
  110. return -1;
  111. /* Check accessible. For misaligned access in the kernel, assume the
  112. address is always accessible (and if not, just fault when the
  113. load/store gets done.) */
  114. if (user_mode(regs)) {
  115. inc_unaligned_user_access();
  116. if (addr >= TASK_SIZE)
  117. return -1;
  118. } else
  119. inc_unaligned_kernel_access();
  120. *address = addr;
  121. perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, addr);
  122. unaligned_fixups_notify(current, opcode, regs);
  123. return 0;
  124. }
  125. static void misaligned_kernel_word_load(__u64 address, int do_sign_extend, __u64 *result)
  126. {
  127. unsigned short x;
  128. unsigned char *p, *q;
  129. p = (unsigned char *) (int) address;
  130. q = (unsigned char *) &x;
  131. q[0] = p[0];
  132. q[1] = p[1];
  133. if (do_sign_extend) {
  134. *result = (__u64)(__s64) *(short *) &x;
  135. } else {
  136. *result = (__u64) x;
  137. }
  138. }
  139. static void misaligned_kernel_word_store(__u64 address, __u64 value)
  140. {
  141. unsigned short x;
  142. unsigned char *p, *q;
  143. p = (unsigned char *) (int) address;
  144. q = (unsigned char *) &x;
  145. x = (__u16) value;
  146. p[0] = q[0];
  147. p[1] = q[1];
  148. }
  149. static int misaligned_load(struct pt_regs *regs,
  150. insn_size_t opcode,
  151. int displacement_not_indexed,
  152. int width_shift,
  153. int do_sign_extend)
  154. {
  155. /* Return -1 for a fault, 0 for OK */
  156. int error;
  157. int destreg;
  158. __u64 address;
  159. error = generate_and_check_address(regs, opcode,
  160. displacement_not_indexed, width_shift, &address);
  161. if (error < 0)
  162. return error;
  163. destreg = (opcode >> 4) & 0x3f;
  164. if (user_mode(regs)) {
  165. __u64 buffer;
  166. if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
  167. return -1;
  168. }
  169. if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
  170. return -1; /* fault */
  171. }
  172. switch (width_shift) {
  173. case 1:
  174. if (do_sign_extend) {
  175. regs->regs[destreg] = (__u64)(__s64) *(__s16 *) &buffer;
  176. } else {
  177. regs->regs[destreg] = (__u64) *(__u16 *) &buffer;
  178. }
  179. break;
  180. case 2:
  181. regs->regs[destreg] = (__u64)(__s64) *(__s32 *) &buffer;
  182. break;
  183. case 3:
  184. regs->regs[destreg] = buffer;
  185. break;
  186. default:
  187. printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
  188. width_shift, (unsigned long) regs->pc);
  189. break;
  190. }
  191. } else {
  192. /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
  193. __u64 lo, hi;
  194. switch (width_shift) {
  195. case 1:
  196. misaligned_kernel_word_load(address, do_sign_extend, &regs->regs[destreg]);
  197. break;
  198. case 2:
  199. asm ("ldlo.l %1, 0, %0" : "=r" (lo) : "r" (address));
  200. asm ("ldhi.l %1, 3, %0" : "=r" (hi) : "r" (address));
  201. regs->regs[destreg] = lo | hi;
  202. break;
  203. case 3:
  204. asm ("ldlo.q %1, 0, %0" : "=r" (lo) : "r" (address));
  205. asm ("ldhi.q %1, 7, %0" : "=r" (hi) : "r" (address));
  206. regs->regs[destreg] = lo | hi;
  207. break;
  208. default:
  209. printk("Unexpected width_shift %d in misaligned_load, PC=%08lx\n",
  210. width_shift, (unsigned long) regs->pc);
  211. break;
  212. }
  213. }
  214. return 0;
  215. }
  216. static int misaligned_store(struct pt_regs *regs,
  217. insn_size_t opcode,
  218. int displacement_not_indexed,
  219. int width_shift)
  220. {
  221. /* Return -1 for a fault, 0 for OK */
  222. int error;
  223. int srcreg;
  224. __u64 address;
  225. error = generate_and_check_address(regs, opcode,
  226. displacement_not_indexed, width_shift, &address);
  227. if (error < 0)
  228. return error;
  229. srcreg = (opcode >> 4) & 0x3f;
  230. if (user_mode(regs)) {
  231. __u64 buffer;
  232. if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
  233. return -1;
  234. }
  235. switch (width_shift) {
  236. case 1:
  237. *(__u16 *) &buffer = (__u16) regs->regs[srcreg];
  238. break;
  239. case 2:
  240. *(__u32 *) &buffer = (__u32) regs->regs[srcreg];
  241. break;
  242. case 3:
  243. buffer = regs->regs[srcreg];
  244. break;
  245. default:
  246. printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
  247. width_shift, (unsigned long) regs->pc);
  248. break;
  249. }
  250. if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
  251. return -1; /* fault */
  252. }
  253. } else {
  254. /* kernel mode - we can take short cuts since if we fault, it's a genuine bug */
  255. __u64 val = regs->regs[srcreg];
  256. switch (width_shift) {
  257. case 1:
  258. misaligned_kernel_word_store(address, val);
  259. break;
  260. case 2:
  261. asm ("stlo.l %1, 0, %0" : : "r" (val), "r" (address));
  262. asm ("sthi.l %1, 3, %0" : : "r" (val), "r" (address));
  263. break;
  264. case 3:
  265. asm ("stlo.q %1, 0, %0" : : "r" (val), "r" (address));
  266. asm ("sthi.q %1, 7, %0" : : "r" (val), "r" (address));
  267. break;
  268. default:
  269. printk("Unexpected width_shift %d in misaligned_store, PC=%08lx\n",
  270. width_shift, (unsigned long) regs->pc);
  271. break;
  272. }
  273. }
  274. return 0;
  275. }
  276. /* Never need to fix up misaligned FPU accesses within the kernel since that's a real
  277. error. */
  278. static int misaligned_fpu_load(struct pt_regs *regs,
  279. insn_size_t opcode,
  280. int displacement_not_indexed,
  281. int width_shift,
  282. int do_paired_load)
  283. {
  284. /* Return -1 for a fault, 0 for OK */
  285. int error;
  286. int destreg;
  287. __u64 address;
  288. error = generate_and_check_address(regs, opcode,
  289. displacement_not_indexed, width_shift, &address);
  290. if (error < 0)
  291. return error;
  292. destreg = (opcode >> 4) & 0x3f;
  293. if (user_mode(regs)) {
  294. __u64 buffer;
  295. __u32 buflo, bufhi;
  296. if (!access_ok(VERIFY_READ, (unsigned long) address, 1UL<<width_shift)) {
  297. return -1;
  298. }
  299. if (__copy_user(&buffer, (const void *)(int)address, (1 << width_shift)) > 0) {
  300. return -1; /* fault */
  301. }
  302. /* 'current' may be the current owner of the FPU state, so
  303. context switch the registers into memory so they can be
  304. indexed by register number. */
  305. if (last_task_used_math == current) {
  306. enable_fpu();
  307. save_fpu(current);
  308. disable_fpu();
  309. last_task_used_math = NULL;
  310. regs->sr |= SR_FD;
  311. }
  312. buflo = *(__u32*) &buffer;
  313. bufhi = *(1 + (__u32*) &buffer);
  314. switch (width_shift) {
  315. case 2:
  316. current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
  317. break;
  318. case 3:
  319. if (do_paired_load) {
  320. current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
  321. current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
  322. } else {
  323. #if defined(CONFIG_CPU_LITTLE_ENDIAN)
  324. current->thread.xstate->hardfpu.fp_regs[destreg] = bufhi;
  325. current->thread.xstate->hardfpu.fp_regs[destreg+1] = buflo;
  326. #else
  327. current->thread.xstate->hardfpu.fp_regs[destreg] = buflo;
  328. current->thread.xstate->hardfpu.fp_regs[destreg+1] = bufhi;
  329. #endif
  330. }
  331. break;
  332. default:
  333. printk("Unexpected width_shift %d in misaligned_fpu_load, PC=%08lx\n",
  334. width_shift, (unsigned long) regs->pc);
  335. break;
  336. }
  337. return 0;
  338. } else {
  339. die ("Misaligned FPU load inside kernel", regs, 0);
  340. return -1;
  341. }
  342. }
  343. static int misaligned_fpu_store(struct pt_regs *regs,
  344. insn_size_t opcode,
  345. int displacement_not_indexed,
  346. int width_shift,
  347. int do_paired_load)
  348. {
  349. /* Return -1 for a fault, 0 for OK */
  350. int error;
  351. int srcreg;
  352. __u64 address;
  353. error = generate_and_check_address(regs, opcode,
  354. displacement_not_indexed, width_shift, &address);
  355. if (error < 0)
  356. return error;
  357. srcreg = (opcode >> 4) & 0x3f;
  358. if (user_mode(regs)) {
  359. __u64 buffer;
  360. /* Initialise these to NaNs. */
  361. __u32 buflo=0xffffffffUL, bufhi=0xffffffffUL;
  362. if (!access_ok(VERIFY_WRITE, (unsigned long) address, 1UL<<width_shift)) {
  363. return -1;
  364. }
  365. /* 'current' may be the current owner of the FPU state, so
  366. context switch the registers into memory so they can be
  367. indexed by register number. */
  368. if (last_task_used_math == current) {
  369. enable_fpu();
  370. save_fpu(current);
  371. disable_fpu();
  372. last_task_used_math = NULL;
  373. regs->sr |= SR_FD;
  374. }
  375. switch (width_shift) {
  376. case 2:
  377. buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
  378. break;
  379. case 3:
  380. if (do_paired_load) {
  381. buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
  382. bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
  383. } else {
  384. #if defined(CONFIG_CPU_LITTLE_ENDIAN)
  385. bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg];
  386. buflo = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
  387. #else
  388. buflo = current->thread.xstate->hardfpu.fp_regs[srcreg];
  389. bufhi = current->thread.xstate->hardfpu.fp_regs[srcreg+1];
  390. #endif
  391. }
  392. break;
  393. default:
  394. printk("Unexpected width_shift %d in misaligned_fpu_store, PC=%08lx\n",
  395. width_shift, (unsigned long) regs->pc);
  396. break;
  397. }
  398. *(__u32*) &buffer = buflo;
  399. *(1 + (__u32*) &buffer) = bufhi;
  400. if (__copy_user((void *)(int)address, &buffer, (1 << width_shift)) > 0) {
  401. return -1; /* fault */
  402. }
  403. return 0;
  404. } else {
  405. die ("Misaligned FPU load inside kernel", regs, 0);
  406. return -1;
  407. }
  408. }
  409. static int misaligned_fixup(struct pt_regs *regs)
  410. {
  411. insn_size_t opcode;
  412. int error;
  413. int major, minor;
  414. unsigned int user_action;
  415. user_action = unaligned_user_action();
  416. if (!(user_action & UM_FIXUP))
  417. return -1;
  418. error = read_opcode(regs->pc, &opcode, user_mode(regs));
  419. if (error < 0) {
  420. return error;
  421. }
  422. major = (opcode >> 26) & 0x3f;
  423. minor = (opcode >> 16) & 0xf;
  424. switch (major) {
  425. case (0x84>>2): /* LD.W */
  426. error = misaligned_load(regs, opcode, 1, 1, 1);
  427. break;
  428. case (0xb0>>2): /* LD.UW */
  429. error = misaligned_load(regs, opcode, 1, 1, 0);
  430. break;
  431. case (0x88>>2): /* LD.L */
  432. error = misaligned_load(regs, opcode, 1, 2, 1);
  433. break;
  434. case (0x8c>>2): /* LD.Q */
  435. error = misaligned_load(regs, opcode, 1, 3, 0);
  436. break;
  437. case (0xa4>>2): /* ST.W */
  438. error = misaligned_store(regs, opcode, 1, 1);
  439. break;
  440. case (0xa8>>2): /* ST.L */
  441. error = misaligned_store(regs, opcode, 1, 2);
  442. break;
  443. case (0xac>>2): /* ST.Q */
  444. error = misaligned_store(regs, opcode, 1, 3);
  445. break;
  446. case (0x40>>2): /* indexed loads */
  447. switch (minor) {
  448. case 0x1: /* LDX.W */
  449. error = misaligned_load(regs, opcode, 0, 1, 1);
  450. break;
  451. case 0x5: /* LDX.UW */
  452. error = misaligned_load(regs, opcode, 0, 1, 0);
  453. break;
  454. case 0x2: /* LDX.L */
  455. error = misaligned_load(regs, opcode, 0, 2, 1);
  456. break;
  457. case 0x3: /* LDX.Q */
  458. error = misaligned_load(regs, opcode, 0, 3, 0);
  459. break;
  460. default:
  461. error = -1;
  462. break;
  463. }
  464. break;
  465. case (0x60>>2): /* indexed stores */
  466. switch (minor) {
  467. case 0x1: /* STX.W */
  468. error = misaligned_store(regs, opcode, 0, 1);
  469. break;
  470. case 0x2: /* STX.L */
  471. error = misaligned_store(regs, opcode, 0, 2);
  472. break;
  473. case 0x3: /* STX.Q */
  474. error = misaligned_store(regs, opcode, 0, 3);
  475. break;
  476. default:
  477. error = -1;
  478. break;
  479. }
  480. break;
  481. case (0x94>>2): /* FLD.S */
  482. error = misaligned_fpu_load(regs, opcode, 1, 2, 0);
  483. break;
  484. case (0x98>>2): /* FLD.P */
  485. error = misaligned_fpu_load(regs, opcode, 1, 3, 1);
  486. break;
  487. case (0x9c>>2): /* FLD.D */
  488. error = misaligned_fpu_load(regs, opcode, 1, 3, 0);
  489. break;
  490. case (0x1c>>2): /* floating indexed loads */
  491. switch (minor) {
  492. case 0x8: /* FLDX.S */
  493. error = misaligned_fpu_load(regs, opcode, 0, 2, 0);
  494. break;
  495. case 0xd: /* FLDX.P */
  496. error = misaligned_fpu_load(regs, opcode, 0, 3, 1);
  497. break;
  498. case 0x9: /* FLDX.D */
  499. error = misaligned_fpu_load(regs, opcode, 0, 3, 0);
  500. break;
  501. default:
  502. error = -1;
  503. break;
  504. }
  505. break;
  506. case (0xb4>>2): /* FLD.S */
  507. error = misaligned_fpu_store(regs, opcode, 1, 2, 0);
  508. break;
  509. case (0xb8>>2): /* FLD.P */
  510. error = misaligned_fpu_store(regs, opcode, 1, 3, 1);
  511. break;
  512. case (0xbc>>2): /* FLD.D */
  513. error = misaligned_fpu_store(regs, opcode, 1, 3, 0);
  514. break;
  515. case (0x3c>>2): /* floating indexed stores */
  516. switch (minor) {
  517. case 0x8: /* FSTX.S */
  518. error = misaligned_fpu_store(regs, opcode, 0, 2, 0);
  519. break;
  520. case 0xd: /* FSTX.P */
  521. error = misaligned_fpu_store(regs, opcode, 0, 3, 1);
  522. break;
  523. case 0x9: /* FSTX.D */
  524. error = misaligned_fpu_store(regs, opcode, 0, 3, 0);
  525. break;
  526. default:
  527. error = -1;
  528. break;
  529. }
  530. break;
  531. default:
  532. /* Fault */
  533. error = -1;
  534. break;
  535. }
  536. if (error < 0) {
  537. return error;
  538. } else {
  539. regs->pc += 4; /* Skip the instruction that's just been emulated */
  540. return 0;
  541. }
  542. }
  543. static void do_unhandled_exception(int signr, char *str, unsigned long error,
  544. struct pt_regs *regs)
  545. {
  546. if (user_mode(regs))
  547. force_sig(signr, current);
  548. die_if_no_fixup(str, regs, error);
  549. }
  550. #define DO_ERROR(signr, str, name) \
  551. asmlinkage void do_##name(unsigned long error_code, struct pt_regs *regs) \
  552. { \
  553. do_unhandled_exception(signr, str, error_code, regs); \
  554. }
  555. DO_ERROR(SIGILL, "illegal slot instruction", illegal_slot_inst)
  556. DO_ERROR(SIGSEGV, "address error (exec)", address_error_exec)
  557. #if defined(CONFIG_SH64_ID2815_WORKAROUND)
  558. #define OPCODE_INVALID 0
  559. #define OPCODE_USER_VALID 1
  560. #define OPCODE_PRIV_VALID 2
  561. /* getcon/putcon - requires checking which control register is referenced. */
  562. #define OPCODE_CTRL_REG 3
  563. /* Table of valid opcodes for SHmedia mode.
  564. Form a 10-bit value by concatenating the major/minor opcodes i.e.
  565. opcode[31:26,20:16]. The 6 MSBs of this value index into the following
  566. array. The 4 LSBs select the bit-pair in the entry (bits 1:0 correspond to
  567. LSBs==4'b0000 etc). */
  568. static unsigned long shmedia_opcode_table[64] = {
  569. 0x55554044,0x54445055,0x15141514,0x14541414,0x00000000,0x10001000,0x01110055,0x04050015,
  570. 0x00000444,0xc0000000,0x44545515,0x40405555,0x55550015,0x10005555,0x55555505,0x04050000,
  571. 0x00000555,0x00000404,0x00040445,0x15151414,0x00000000,0x00000000,0x00000000,0x00000000,
  572. 0x00000055,0x40404444,0x00000404,0xc0009495,0x00000000,0x00000000,0x00000000,0x00000000,
  573. 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
  574. 0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
  575. 0x80005050,0x04005055,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,0x55555555,
  576. 0x81055554,0x00000404,0x55555555,0x55555555,0x00000000,0x00000000,0x00000000,0x00000000
  577. };
  578. /* Workaround SH5-101 cut2 silicon defect #2815 :
  579. in some situations, inter-mode branches from SHcompact -> SHmedia
  580. which should take ITLBMISS or EXECPROT exceptions at the target
  581. falsely take RESINST at the target instead. */
  582. void do_reserved_inst(unsigned long error_code, struct pt_regs *regs)
  583. {
  584. insn_size_t opcode = 0x6ff4fff0; /* guaranteed reserved opcode */
  585. unsigned long pc, aligned_pc;
  586. unsigned long index, shift;
  587. unsigned long major, minor, combined;
  588. unsigned long reserved_field;
  589. int opcode_state;
  590. int get_user_error;
  591. int signr = SIGILL;
  592. char *exception_name = "reserved_instruction";
  593. pc = regs->pc;
  594. /* SHcompact is not handled */
  595. if (unlikely((pc & 3) == 0))
  596. goto out;
  597. /* SHmedia : check for defect. This requires executable vmas
  598. to be readable too. */
  599. aligned_pc = pc & ~3;
  600. if (!access_ok(VERIFY_READ, aligned_pc, sizeof(insn_size_t)))
  601. get_user_error = -EFAULT;
  602. else
  603. get_user_error = __get_user(opcode, (insn_size_t *)aligned_pc);
  604. if (get_user_error < 0) {
  605. /*
  606. * Error trying to read opcode. This typically means a
  607. * real fault, not a RESINST any more. So change the
  608. * codes.
  609. */
  610. exception_name = "address error (exec)";
  611. signr = SIGSEGV;
  612. goto out;
  613. }
  614. /* These bits are currently reserved as zero in all valid opcodes */
  615. reserved_field = opcode & 0xf;
  616. if (unlikely(reserved_field))
  617. goto out; /* invalid opcode */
  618. major = (opcode >> 26) & 0x3f;
  619. minor = (opcode >> 16) & 0xf;
  620. combined = (major << 4) | minor;
  621. index = major;
  622. shift = minor << 1;
  623. opcode_state = (shmedia_opcode_table[index] >> shift) & 0x3;
  624. switch (opcode_state) {
  625. case OPCODE_INVALID:
  626. /* Trap. */
  627. break;
  628. case OPCODE_USER_VALID:
  629. /*
  630. * Restart the instruction: the branch to the instruction
  631. * will now be from an RTE not from SHcompact so the
  632. * silicon defect won't be triggered.
  633. */
  634. return;
  635. case OPCODE_PRIV_VALID:
  636. if (!user_mode(regs)) {
  637. /*
  638. * Should only ever get here if a module has
  639. * SHcompact code inside it. If so, the same fix
  640. * up is needed.
  641. */
  642. return; /* same reason */
  643. }
  644. /*
  645. * Otherwise, user mode trying to execute a privileged
  646. * instruction - fall through to trap.
  647. */
  648. break;
  649. case OPCODE_CTRL_REG:
  650. /* If in privileged mode, return as above. */
  651. if (!user_mode(regs))
  652. return;
  653. /* In user mode ... */
  654. if (combined == 0x9f) { /* GETCON */
  655. unsigned long regno = (opcode >> 20) & 0x3f;
  656. if (regno >= 62)
  657. return;
  658. /* reserved/privileged control register => trap */
  659. } else if (combined == 0x1bf) { /* PUTCON */
  660. unsigned long regno = (opcode >> 4) & 0x3f;
  661. if (regno >= 62)
  662. return;
  663. /* reserved/privileged control register => trap */
  664. }
  665. break;
  666. default:
  667. /* Fall through to trap. */
  668. break;
  669. }
  670. out:
  671. do_unhandled_exception(signr, exception_name, error_code, regs);
  672. }
  673. #else /* CONFIG_SH64_ID2815_WORKAROUND */
  674. /* If the workaround isn't needed, this is just a straightforward reserved
  675. instruction */
  676. DO_ERROR(SIGILL, "reserved instruction", reserved_inst)
  677. #endif /* CONFIG_SH64_ID2815_WORKAROUND */
  678. /* Called with interrupts disabled */
  679. asmlinkage void do_exception_error(unsigned long ex, struct pt_regs *regs)
  680. {
  681. die_if_kernel("exception", regs, ex);
  682. }
  683. asmlinkage int do_unknown_trapa(unsigned long scId, struct pt_regs *regs)
  684. {
  685. /* Syscall debug */
  686. printk("System call ID error: [0x1#args:8 #syscall:16 0x%lx]\n", scId);
  687. die_if_kernel("unknown trapa", regs, scId);
  688. return -ENOSYS;
  689. }
  690. /* Implement misaligned load/store handling for kernel (and optionally for user
  691. mode too). Limitation : only SHmedia mode code is handled - there is no
  692. handling at all for misaligned accesses occurring in SHcompact code yet. */
  693. asmlinkage void do_address_error_load(unsigned long error_code, struct pt_regs *regs)
  694. {
  695. if (misaligned_fixup(regs) < 0)
  696. do_unhandled_exception(SIGSEGV, "address error(load)",
  697. error_code, regs);
  698. }
  699. asmlinkage void do_address_error_store(unsigned long error_code, struct pt_regs *regs)
  700. {
  701. if (misaligned_fixup(regs) < 0)
  702. do_unhandled_exception(SIGSEGV, "address error(store)",
  703. error_code, regs);
  704. }
  705. asmlinkage void do_debug_interrupt(unsigned long code, struct pt_regs *regs)
  706. {
  707. u64 peek_real_address_q(u64 addr);
  708. u64 poke_real_address_q(u64 addr, u64 val);
  709. unsigned long long DM_EXP_CAUSE_PHY = 0x0c100010;
  710. unsigned long long exp_cause;
  711. /* It's not worth ioremapping the debug module registers for the amount
  712. of access we make to them - just go direct to their physical
  713. addresses. */
  714. exp_cause = peek_real_address_q(DM_EXP_CAUSE_PHY);
  715. if (exp_cause & ~4)
  716. printk("DM.EXP_CAUSE had unexpected bits set (=%08lx)\n",
  717. (unsigned long)(exp_cause & 0xffffffff));
  718. show_state();
  719. /* Clear all DEBUGINT causes */
  720. poke_real_address_q(DM_EXP_CAUSE_PHY, 0x0);
  721. }
  722. void per_cpu_trap_init(void)
  723. {
  724. /* Nothing to do for now, VBR initialization later. */
  725. }