traps.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990
  1. /*
  2. * Meta exception handling.
  3. *
  4. * Copyright (C) 2005,2006,2007,2008,2009,2012 Imagination Technologies Ltd.
  5. *
  6. * This file is subject to the terms and conditions of the GNU General Public
  7. * License. See the file COPYING in the main directory of this archive
  8. * for more details.
  9. */
  10. #include <linux/export.h>
  11. #include <linux/sched.h>
  12. #include <linux/signal.h>
  13. #include <linux/kernel.h>
  14. #include <linux/mm.h>
  15. #include <linux/types.h>
  16. #include <linux/init.h>
  17. #include <linux/interrupt.h>
  18. #include <linux/preempt.h>
  19. #include <linux/ptrace.h>
  20. #include <linux/module.h>
  21. #include <linux/kallsyms.h>
  22. #include <linux/kdebug.h>
  23. #include <linux/kexec.h>
  24. #include <linux/unistd.h>
  25. #include <linux/smp.h>
  26. #include <linux/slab.h>
  27. #include <linux/syscalls.h>
  28. #include <asm/bug.h>
  29. #include <asm/core_reg.h>
  30. #include <asm/irqflags.h>
  31. #include <asm/siginfo.h>
  32. #include <asm/traps.h>
  33. #include <asm/hwthread.h>
  34. #include <asm/setup.h>
  35. #include <asm/switch.h>
  36. #include <asm/user_gateway.h>
  37. #include <asm/syscall.h>
  38. #include <asm/syscalls.h>
  39. /* Passing syscall arguments as long long is quicker. */
  40. typedef unsigned int (*LPSYSCALL) (unsigned long long,
  41. unsigned long long,
  42. unsigned long long);
  43. /*
  44. * Users of LNKSET should compare the bus error bits obtained from DEFR
  45. * against TXDEFR_LNKSET_SUCCESS only as the failure code will vary between
  46. * different cores revisions.
  47. */
  48. #define TXDEFR_LNKSET_SUCCESS 0x02000000
  49. #define TXDEFR_LNKSET_FAILURE 0x04000000
  50. /*
  51. * Our global TBI handle. Initialised from setup.c/setup_arch.
  52. */
  53. DECLARE_PER_CPU(PTBI, pTBI);
  54. #ifdef CONFIG_SMP
  55. static DEFINE_PER_CPU(unsigned int, trigger_mask);
  56. #else
  57. unsigned int global_trigger_mask;
  58. EXPORT_SYMBOL(global_trigger_mask);
  59. #endif
  60. unsigned long per_cpu__stack_save[NR_CPUS];
  61. static const char * const trap_names[] = {
  62. [TBIXXF_SIGNUM_IIF] = "Illegal instruction fault",
  63. [TBIXXF_SIGNUM_PGF] = "Privilege violation",
  64. [TBIXXF_SIGNUM_DHF] = "Unaligned data access fault",
  65. [TBIXXF_SIGNUM_IGF] = "Code fetch general read failure",
  66. [TBIXXF_SIGNUM_DGF] = "Data access general read/write fault",
  67. [TBIXXF_SIGNUM_IPF] = "Code fetch page fault",
  68. [TBIXXF_SIGNUM_DPF] = "Data access page fault",
  69. [TBIXXF_SIGNUM_IHF] = "Instruction breakpoint",
  70. [TBIXXF_SIGNUM_DWF] = "Read-only data access fault",
  71. };
  72. const char *trap_name(int trapno)
  73. {
  74. if (trapno >= 0 && trapno < ARRAY_SIZE(trap_names)
  75. && trap_names[trapno])
  76. return trap_names[trapno];
  77. return "Unknown fault";
  78. }
  79. static DEFINE_SPINLOCK(die_lock);
  80. void __noreturn die(const char *str, struct pt_regs *regs,
  81. long err, unsigned long addr)
  82. {
  83. static int die_counter;
  84. oops_enter();
  85. spin_lock_irq(&die_lock);
  86. console_verbose();
  87. bust_spinlocks(1);
  88. pr_err("%s: err %04lx (%s) addr %08lx [#%d]\n", str, err & 0xffff,
  89. trap_name(err & 0xffff), addr, ++die_counter);
  90. print_modules();
  91. show_regs(regs);
  92. pr_err("Process: %s (pid: %d, stack limit = %p)\n", current->comm,
  93. task_pid_nr(current), task_stack_page(current) + THREAD_SIZE);
  94. bust_spinlocks(0);
  95. add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
  96. if (kexec_should_crash(current))
  97. crash_kexec(regs);
  98. if (in_interrupt())
  99. panic("Fatal exception in interrupt");
  100. if (panic_on_oops)
  101. panic("Fatal exception");
  102. spin_unlock_irq(&die_lock);
  103. oops_exit();
  104. do_exit(SIGSEGV);
  105. }
  106. #ifdef CONFIG_METAG_DSP
  107. /*
  108. * The ECH encoding specifies the size of a DSPRAM as,
  109. *
  110. * "slots" / 4
  111. *
  112. * A "slot" is the size of two DSPRAM bank entries; an entry from
  113. * DSPRAM bank A and an entry from DSPRAM bank B. One DSPRAM bank
  114. * entry is 4 bytes.
  115. */
  116. #define SLOT_SZ 8
  117. static inline unsigned int decode_dspram_size(unsigned int size)
  118. {
  119. unsigned int _sz = size & 0x7f;
  120. return _sz * SLOT_SZ * 4;
  121. }
  122. static void dspram_save(struct meta_ext_context *dsp_ctx,
  123. unsigned int ramA_sz, unsigned int ramB_sz)
  124. {
  125. unsigned int ram_sz[2];
  126. int i;
  127. ram_sz[0] = ramA_sz;
  128. ram_sz[1] = ramB_sz;
  129. for (i = 0; i < 2; i++) {
  130. if (ram_sz[i] != 0) {
  131. unsigned int sz;
  132. if (i == 0)
  133. sz = decode_dspram_size(ram_sz[i] >> 8);
  134. else
  135. sz = decode_dspram_size(ram_sz[i]);
  136. if (dsp_ctx->ram[i] == NULL) {
  137. dsp_ctx->ram[i] = kmalloc(sz, GFP_KERNEL);
  138. if (dsp_ctx->ram[i] == NULL)
  139. panic("couldn't save DSP context");
  140. } else {
  141. if (ram_sz[i] > dsp_ctx->ram_sz[i]) {
  142. kfree(dsp_ctx->ram[i]);
  143. dsp_ctx->ram[i] = kmalloc(sz,
  144. GFP_KERNEL);
  145. if (dsp_ctx->ram[i] == NULL)
  146. panic("couldn't save DSP context");
  147. }
  148. }
  149. if (i == 0)
  150. __TBIDspramSaveA(ram_sz[i], dsp_ctx->ram[i]);
  151. else
  152. __TBIDspramSaveB(ram_sz[i], dsp_ctx->ram[i]);
  153. dsp_ctx->ram_sz[i] = ram_sz[i];
  154. }
  155. }
  156. }
  157. #endif /* CONFIG_METAG_DSP */
  158. /*
  159. * Allow interrupts to be nested and save any "extended" register
  160. * context state, e.g. DSP regs and RAMs.
  161. */
  162. static void nest_interrupts(TBIRES State, unsigned long mask)
  163. {
  164. #ifdef CONFIG_METAG_DSP
  165. struct meta_ext_context *dsp_ctx;
  166. unsigned int D0_8;
  167. /*
  168. * D0.8 may contain an ECH encoding. The upper 16 bits
  169. * tell us what DSP resources the current process is
  170. * using. OR the bits into the SaveMask so that
  171. * __TBINestInts() knows what resources to save as
  172. * part of this context.
  173. *
  174. * Don't save the context if we're nesting interrupts in the
  175. * kernel because the kernel doesn't use DSP hardware.
  176. */
  177. D0_8 = __core_reg_get(D0.8);
  178. if (D0_8 && (State.Sig.SaveMask & TBICTX_PRIV_BIT)) {
  179. State.Sig.SaveMask |= (D0_8 >> 16);
  180. dsp_ctx = current->thread.dsp_context;
  181. if (dsp_ctx == NULL) {
  182. dsp_ctx = kzalloc(sizeof(*dsp_ctx), GFP_KERNEL);
  183. if (dsp_ctx == NULL)
  184. panic("couldn't save DSP context: ENOMEM");
  185. current->thread.dsp_context = dsp_ctx;
  186. }
  187. current->thread.user_flags |= (D0_8 & 0xffff0000);
  188. __TBINestInts(State, &dsp_ctx->regs, mask);
  189. dspram_save(dsp_ctx, D0_8 & 0x7f00, D0_8 & 0x007f);
  190. } else
  191. __TBINestInts(State, NULL, mask);
  192. #else
  193. __TBINestInts(State, NULL, mask);
  194. #endif
  195. }
  196. void head_end(TBIRES State, unsigned long mask)
  197. {
  198. unsigned int savemask = (unsigned short)State.Sig.SaveMask;
  199. unsigned int ctx_savemask = (unsigned short)State.Sig.pCtx->SaveMask;
  200. if (savemask & TBICTX_PRIV_BIT) {
  201. ctx_savemask |= TBICTX_PRIV_BIT;
  202. current->thread.user_flags = savemask;
  203. }
  204. /* Always undo the sleep bit */
  205. ctx_savemask &= ~TBICTX_WAIT_BIT;
  206. /* Always save the catch buffer and RD pipe if they are dirty */
  207. savemask |= TBICTX_XCBF_BIT;
  208. /* Only save the catch and RD if we have not already done so.
  209. * Note - the RD bits are in the pCtx only, and not in the
  210. * State.SaveMask.
  211. */
  212. if ((savemask & TBICTX_CBUF_BIT) ||
  213. (ctx_savemask & TBICTX_CBRP_BIT)) {
  214. /* Have we already saved the buffers though?
  215. * - See TestTrack 5071 */
  216. if (ctx_savemask & TBICTX_XCBF_BIT) {
  217. /* Strip off the bits so the call to __TBINestInts
  218. * won't save the buffers again. */
  219. savemask &= ~TBICTX_CBUF_BIT;
  220. ctx_savemask &= ~TBICTX_CBRP_BIT;
  221. }
  222. }
  223. #ifdef CONFIG_METAG_META21
  224. {
  225. unsigned int depth, txdefr;
  226. /*
  227. * Save TXDEFR state.
  228. *
  229. * The process may have been interrupted after a LNKSET, but
  230. * before it could read the DEFR state, so we mustn't lose that
  231. * state or it could end up retrying an atomic operation that
  232. * succeeded.
  233. *
  234. * All interrupts are disabled at this point so we
  235. * don't need to perform any locking. We must do this
  236. * dance before we use LNKGET or LNKSET.
  237. */
  238. BUG_ON(current->thread.int_depth > HARDIRQ_BITS);
  239. depth = current->thread.int_depth++;
  240. txdefr = __core_reg_get(TXDEFR);
  241. txdefr &= TXDEFR_BUS_STATE_BITS;
  242. if (txdefr & TXDEFR_LNKSET_SUCCESS)
  243. current->thread.txdefr_failure &= ~(1 << depth);
  244. else
  245. current->thread.txdefr_failure |= (1 << depth);
  246. }
  247. #endif
  248. State.Sig.SaveMask = savemask;
  249. State.Sig.pCtx->SaveMask = ctx_savemask;
  250. nest_interrupts(State, mask);
  251. #ifdef CONFIG_METAG_POISON_CATCH_BUFFERS
  252. /* Poison the catch registers. This shows up any mistakes we have
  253. * made in their handling MUCH quicker.
  254. */
  255. __core_reg_set(TXCATCH0, 0x87650021);
  256. __core_reg_set(TXCATCH1, 0x87654322);
  257. __core_reg_set(TXCATCH2, 0x87654323);
  258. __core_reg_set(TXCATCH3, 0x87654324);
  259. #endif /* CONFIG_METAG_POISON_CATCH_BUFFERS */
  260. }
  261. TBIRES tail_end_sys(TBIRES State, int syscall, int *restart)
  262. {
  263. struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
  264. unsigned long flags;
  265. local_irq_disable();
  266. if (user_mode(regs)) {
  267. flags = current_thread_info()->flags;
  268. if (flags & _TIF_WORK_MASK &&
  269. do_work_pending(regs, flags, syscall)) {
  270. *restart = 1;
  271. return State;
  272. }
  273. #ifdef CONFIG_METAG_FPU
  274. if (current->thread.fpu_context &&
  275. current->thread.fpu_context->needs_restore) {
  276. __TBICtxFPURestore(State, current->thread.fpu_context);
  277. /*
  278. * Clearing this bit ensures the FP unit is not made
  279. * active again unless it is used.
  280. */
  281. State.Sig.SaveMask &= ~TBICTX_FPAC_BIT;
  282. current->thread.fpu_context->needs_restore = false;
  283. }
  284. State.Sig.TrigMask |= TBI_TRIG_BIT(TBID_SIGNUM_DFR);
  285. #endif
  286. }
  287. /* TBI will turn interrupts back on at some point. */
  288. if (!irqs_disabled_flags((unsigned long)State.Sig.TrigMask))
  289. trace_hardirqs_on();
  290. #ifdef CONFIG_METAG_DSP
  291. /*
  292. * If we previously saved an extended context then restore it
  293. * now. Otherwise, clear D0.8 because this process is not
  294. * using DSP hardware.
  295. */
  296. if (State.Sig.pCtx->SaveMask & TBICTX_XEXT_BIT) {
  297. unsigned int D0_8;
  298. struct meta_ext_context *dsp_ctx = current->thread.dsp_context;
  299. /* Make sure we're going to return to userland. */
  300. BUG_ON(current->thread.int_depth != 1);
  301. if (dsp_ctx->ram_sz[0] > 0)
  302. __TBIDspramRestoreA(dsp_ctx->ram_sz[0],
  303. dsp_ctx->ram[0]);
  304. if (dsp_ctx->ram_sz[1] > 0)
  305. __TBIDspramRestoreB(dsp_ctx->ram_sz[1],
  306. dsp_ctx->ram[1]);
  307. State.Sig.SaveMask |= State.Sig.pCtx->SaveMask;
  308. __TBICtxRestore(State, current->thread.dsp_context);
  309. D0_8 = __core_reg_get(D0.8);
  310. D0_8 |= current->thread.user_flags & 0xffff0000;
  311. D0_8 |= (dsp_ctx->ram_sz[1] | dsp_ctx->ram_sz[0]) & 0xffff;
  312. __core_reg_set(D0.8, D0_8);
  313. } else
  314. __core_reg_set(D0.8, 0);
  315. #endif /* CONFIG_METAG_DSP */
  316. #ifdef CONFIG_METAG_META21
  317. {
  318. unsigned int depth, txdefr;
  319. /*
  320. * If there hasn't been a LNKSET since the last LNKGET then the
  321. * link flag will be set, causing the next LNKSET to succeed if
  322. * the addresses match. The two LNK operations may not be a pair
  323. * (e.g. see atomic_read()), so the LNKSET should fail.
  324. * We use a conditional-never LNKSET to clear the link flag
  325. * without side effects.
  326. */
  327. asm volatile("LNKSETDNV [D0Re0],D0Re0");
  328. depth = --current->thread.int_depth;
  329. BUG_ON(user_mode(regs) && depth);
  330. txdefr = __core_reg_get(TXDEFR);
  331. txdefr &= ~TXDEFR_BUS_STATE_BITS;
  332. /* Do we need to restore a failure code into TXDEFR? */
  333. if (current->thread.txdefr_failure & (1 << depth))
  334. txdefr |= (TXDEFR_LNKSET_FAILURE | TXDEFR_BUS_TRIG_BIT);
  335. else
  336. txdefr |= (TXDEFR_LNKSET_SUCCESS | TXDEFR_BUS_TRIG_BIT);
  337. __core_reg_set(TXDEFR, txdefr);
  338. }
  339. #endif
  340. return State;
  341. }
  342. #ifdef CONFIG_SMP
  343. /*
  344. * If we took an interrupt in the middle of __kuser_get_tls then we need
  345. * to rewind the PC to the start of the function in case the process
  346. * gets migrated to another thread (SMP only) and it reads the wrong tls
  347. * data.
  348. */
  349. static inline void _restart_critical_section(TBIRES State)
  350. {
  351. unsigned long get_tls_start;
  352. unsigned long get_tls_end;
  353. get_tls_start = (unsigned long)__kuser_get_tls -
  354. (unsigned long)&__user_gateway_start;
  355. get_tls_start += USER_GATEWAY_PAGE;
  356. get_tls_end = (unsigned long)__kuser_get_tls_end -
  357. (unsigned long)&__user_gateway_start;
  358. get_tls_end += USER_GATEWAY_PAGE;
  359. if ((State.Sig.pCtx->CurrPC >= get_tls_start) &&
  360. (State.Sig.pCtx->CurrPC < get_tls_end))
  361. State.Sig.pCtx->CurrPC = get_tls_start;
  362. }
  363. #else
  364. /*
  365. * If we took an interrupt in the middle of
  366. * __kuser_cmpxchg then we need to rewind the PC to the
  367. * start of the function.
  368. */
  369. static inline void _restart_critical_section(TBIRES State)
  370. {
  371. unsigned long cmpxchg_start;
  372. unsigned long cmpxchg_end;
  373. cmpxchg_start = (unsigned long)__kuser_cmpxchg -
  374. (unsigned long)&__user_gateway_start;
  375. cmpxchg_start += USER_GATEWAY_PAGE;
  376. cmpxchg_end = (unsigned long)__kuser_cmpxchg_end -
  377. (unsigned long)&__user_gateway_start;
  378. cmpxchg_end += USER_GATEWAY_PAGE;
  379. if ((State.Sig.pCtx->CurrPC >= cmpxchg_start) &&
  380. (State.Sig.pCtx->CurrPC < cmpxchg_end))
  381. State.Sig.pCtx->CurrPC = cmpxchg_start;
  382. }
  383. #endif
  384. /* Used by kick_handler() */
  385. void restart_critical_section(TBIRES State)
  386. {
  387. _restart_critical_section(State);
  388. }
  389. TBIRES trigger_handler(TBIRES State, int SigNum, int Triggers, int Inst,
  390. PTBI pTBI)
  391. {
  392. head_end(State, ~INTS_OFF_MASK);
  393. /* If we interrupted user code handle any critical sections. */
  394. if (State.Sig.SaveMask & TBICTX_PRIV_BIT)
  395. _restart_critical_section(State);
  396. trace_hardirqs_off();
  397. do_IRQ(SigNum, (struct pt_regs *)State.Sig.pCtx);
  398. return tail_end(State);
  399. }
  400. static unsigned int load_fault(PTBICTXEXTCB0 pbuf)
  401. {
  402. return pbuf->CBFlags & TXCATCH0_READ_BIT;
  403. }
  404. static unsigned long fault_address(PTBICTXEXTCB0 pbuf)
  405. {
  406. return pbuf->CBAddr;
  407. }
  408. static void unhandled_fault(struct pt_regs *regs, unsigned long addr,
  409. int signo, int code, int trapno)
  410. {
  411. if (user_mode(regs)) {
  412. siginfo_t info;
  413. if (show_unhandled_signals && unhandled_signal(current, signo)
  414. && printk_ratelimit()) {
  415. pr_info("pid %d unhandled fault: pc 0x%08x, addr 0x%08lx, trap %d (%s)\n",
  416. current->pid, regs->ctx.CurrPC, addr,
  417. trapno, trap_name(trapno));
  418. print_vma_addr(" in ", regs->ctx.CurrPC);
  419. print_vma_addr(" rtp in ", regs->ctx.DX[4].U1);
  420. printk("\n");
  421. show_regs(regs);
  422. }
  423. info.si_signo = signo;
  424. info.si_errno = 0;
  425. info.si_code = code;
  426. info.si_addr = (__force void __user *)addr;
  427. info.si_trapno = trapno;
  428. force_sig_info(signo, &info, current);
  429. } else {
  430. die("Oops", regs, trapno, addr);
  431. }
  432. }
  433. static int handle_data_fault(PTBICTXEXTCB0 pcbuf, struct pt_regs *regs,
  434. unsigned int data_address, int trapno)
  435. {
  436. int ret;
  437. ret = do_page_fault(regs, data_address, !load_fault(pcbuf), trapno);
  438. return ret;
  439. }
  440. static unsigned long get_inst_fault_address(struct pt_regs *regs)
  441. {
  442. return regs->ctx.CurrPC;
  443. }
  444. TBIRES fault_handler(TBIRES State, int SigNum, int Triggers,
  445. int Inst, PTBI pTBI)
  446. {
  447. struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
  448. PTBICTXEXTCB0 pcbuf = (PTBICTXEXTCB0)&regs->extcb0;
  449. unsigned long data_address;
  450. head_end(State, ~INTS_OFF_MASK);
  451. /* Hardware breakpoint or data watch */
  452. if ((SigNum == TBIXXF_SIGNUM_IHF) ||
  453. ((SigNum == TBIXXF_SIGNUM_DHF) &&
  454. (pcbuf[0].CBFlags & (TXCATCH0_WATCH1_BIT |
  455. TXCATCH0_WATCH0_BIT)))) {
  456. State = __TBIUnExpXXX(State, SigNum, Triggers, Inst,
  457. pTBI);
  458. return tail_end(State);
  459. }
  460. local_irq_enable();
  461. data_address = fault_address(pcbuf);
  462. switch (SigNum) {
  463. case TBIXXF_SIGNUM_IGF:
  464. /* 1st-level entry invalid (instruction fetch) */
  465. case TBIXXF_SIGNUM_IPF: {
  466. /* 2nd-level entry invalid (instruction fetch) */
  467. unsigned long addr = get_inst_fault_address(regs);
  468. do_page_fault(regs, addr, 0, SigNum);
  469. break;
  470. }
  471. case TBIXXF_SIGNUM_DGF:
  472. /* 1st-level entry invalid (data access) */
  473. case TBIXXF_SIGNUM_DPF:
  474. /* 2nd-level entry invalid (data access) */
  475. case TBIXXF_SIGNUM_DWF:
  476. /* Write to read only page */
  477. handle_data_fault(pcbuf, regs, data_address, SigNum);
  478. break;
  479. case TBIXXF_SIGNUM_IIF:
  480. /* Illegal instruction */
  481. unhandled_fault(regs, regs->ctx.CurrPC, SIGILL, ILL_ILLOPC,
  482. SigNum);
  483. break;
  484. case TBIXXF_SIGNUM_DHF:
  485. /* Unaligned access */
  486. unhandled_fault(regs, data_address, SIGBUS, BUS_ADRALN,
  487. SigNum);
  488. break;
  489. case TBIXXF_SIGNUM_PGF:
  490. /* Privilege violation */
  491. unhandled_fault(regs, data_address, SIGSEGV, SEGV_ACCERR,
  492. SigNum);
  493. break;
  494. default:
  495. BUG();
  496. break;
  497. }
  498. return tail_end(State);
  499. }
  500. static bool switch_is_syscall(unsigned int inst)
  501. {
  502. return inst == __METAG_SW_ENCODING(SYS);
  503. }
  504. static bool switch_is_legacy_syscall(unsigned int inst)
  505. {
  506. return inst == __METAG_SW_ENCODING(SYS_LEGACY);
  507. }
  508. static inline void step_over_switch(struct pt_regs *regs, unsigned int inst)
  509. {
  510. regs->ctx.CurrPC += 4;
  511. }
  512. static inline int test_syscall_work(void)
  513. {
  514. return current_thread_info()->flags & _TIF_WORK_SYSCALL_MASK;
  515. }
  516. TBIRES switch1_handler(TBIRES State, int SigNum, int Triggers,
  517. int Inst, PTBI pTBI)
  518. {
  519. struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
  520. unsigned int sysnumber;
  521. unsigned long long a1_a2, a3_a4, a5_a6;
  522. LPSYSCALL syscall_entry;
  523. int restart;
  524. head_end(State, ~INTS_OFF_MASK);
  525. /*
  526. * If this is not a syscall SWITCH it could be a breakpoint.
  527. */
  528. if (!switch_is_syscall(Inst)) {
  529. /*
  530. * Alert the user if they're trying to use legacy system
  531. * calls. This suggests they need to update their C
  532. * library and build against up to date kernel headers.
  533. */
  534. if (switch_is_legacy_syscall(Inst))
  535. pr_warn_once("WARNING: A legacy syscall was made. Your userland needs updating.\n");
  536. /*
  537. * We don't know how to handle the SWITCH and cannot
  538. * safely ignore it, so treat all unknown switches
  539. * (including breakpoints) as traps.
  540. */
  541. force_sig(SIGTRAP, current);
  542. return tail_end(State);
  543. }
  544. local_irq_enable();
  545. restart_syscall:
  546. restart = 0;
  547. sysnumber = regs->ctx.DX[0].U1;
  548. if (test_syscall_work())
  549. sysnumber = syscall_trace_enter(regs);
  550. /* Skip over the SWITCH instruction - or you just get 'stuck' on it! */
  551. step_over_switch(regs, Inst);
  552. if (sysnumber >= __NR_syscalls) {
  553. pr_debug("unknown syscall number: %d\n", sysnumber);
  554. syscall_entry = (LPSYSCALL) sys_ni_syscall;
  555. } else {
  556. syscall_entry = (LPSYSCALL) sys_call_table[sysnumber];
  557. }
  558. /* Use 64bit loads for speed. */
  559. a5_a6 = *(unsigned long long *)&regs->ctx.DX[1];
  560. a3_a4 = *(unsigned long long *)&regs->ctx.DX[2];
  561. a1_a2 = *(unsigned long long *)&regs->ctx.DX[3];
  562. /* here is the actual call to the syscall handler functions */
  563. regs->ctx.DX[0].U0 = syscall_entry(a1_a2, a3_a4, a5_a6);
  564. if (test_syscall_work())
  565. syscall_trace_leave(regs);
  566. State = tail_end_sys(State, sysnumber, &restart);
  567. /* Handlerless restarts shouldn't go via userland */
  568. if (restart)
  569. goto restart_syscall;
  570. return State;
  571. }
  572. TBIRES switchx_handler(TBIRES State, int SigNum, int Triggers,
  573. int Inst, PTBI pTBI)
  574. {
  575. struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
  576. /*
  577. * This can be caused by any user process simply executing an unusual
  578. * SWITCH instruction. If there's no DA, __TBIUnExpXXX will cause the
  579. * thread to stop, so signal a SIGTRAP instead.
  580. */
  581. head_end(State, ~INTS_OFF_MASK);
  582. if (user_mode(regs))
  583. force_sig(SIGTRAP, current);
  584. else
  585. State = __TBIUnExpXXX(State, SigNum, Triggers, Inst, pTBI);
  586. return tail_end(State);
  587. }
  588. #ifdef CONFIG_METAG_META21
  589. TBIRES fpe_handler(TBIRES State, int SigNum, int Triggers, int Inst, PTBI pTBI)
  590. {
  591. struct pt_regs *regs = (struct pt_regs *)State.Sig.pCtx;
  592. unsigned int error_state = Triggers;
  593. siginfo_t info;
  594. head_end(State, ~INTS_OFF_MASK);
  595. local_irq_enable();
  596. info.si_signo = SIGFPE;
  597. if (error_state & TXSTAT_FPE_INVALID_BIT)
  598. info.si_code = FPE_FLTINV;
  599. else if (error_state & TXSTAT_FPE_DIVBYZERO_BIT)
  600. info.si_code = FPE_FLTDIV;
  601. else if (error_state & TXSTAT_FPE_OVERFLOW_BIT)
  602. info.si_code = FPE_FLTOVF;
  603. else if (error_state & TXSTAT_FPE_UNDERFLOW_BIT)
  604. info.si_code = FPE_FLTUND;
  605. else if (error_state & TXSTAT_FPE_INEXACT_BIT)
  606. info.si_code = FPE_FLTRES;
  607. else
  608. info.si_code = 0;
  609. info.si_errno = 0;
  610. info.si_addr = (__force void __user *)regs->ctx.CurrPC;
  611. force_sig_info(SIGFPE, &info, current);
  612. return tail_end(State);
  613. }
  614. #endif
  615. #ifdef CONFIG_METAG_SUSPEND_MEM
  616. struct traps_context {
  617. PTBIAPIFN fnSigs[TBID_SIGNUM_MAX + 1];
  618. };
  619. static struct traps_context *metag_traps_context;
  620. int traps_save_context(void)
  621. {
  622. unsigned long cpu = smp_processor_id();
  623. PTBI _pTBI = per_cpu(pTBI, cpu);
  624. struct traps_context *context;
  625. context = kzalloc(sizeof(*context), GFP_ATOMIC);
  626. if (!context)
  627. return -ENOMEM;
  628. memcpy(context->fnSigs, (void *)_pTBI->fnSigs, sizeof(context->fnSigs));
  629. metag_traps_context = context;
  630. return 0;
  631. }
  632. int traps_restore_context(void)
  633. {
  634. unsigned long cpu = smp_processor_id();
  635. PTBI _pTBI = per_cpu(pTBI, cpu);
  636. struct traps_context *context = metag_traps_context;
  637. metag_traps_context = NULL;
  638. memcpy((void *)_pTBI->fnSigs, context->fnSigs, sizeof(context->fnSigs));
  639. kfree(context);
  640. return 0;
  641. }
  642. #endif
  643. #ifdef CONFIG_SMP
  644. static inline unsigned int _get_trigger_mask(void)
  645. {
  646. unsigned long cpu = smp_processor_id();
  647. return per_cpu(trigger_mask, cpu);
  648. }
  649. unsigned int get_trigger_mask(void)
  650. {
  651. return _get_trigger_mask();
  652. }
  653. EXPORT_SYMBOL(get_trigger_mask);
  654. static void set_trigger_mask(unsigned int mask)
  655. {
  656. unsigned long cpu = smp_processor_id();
  657. per_cpu(trigger_mask, cpu) = mask;
  658. }
  659. void arch_local_irq_enable(void)
  660. {
  661. preempt_disable();
  662. arch_local_irq_restore(_get_trigger_mask());
  663. preempt_enable_no_resched();
  664. }
  665. EXPORT_SYMBOL(arch_local_irq_enable);
  666. #else
  667. static void set_trigger_mask(unsigned int mask)
  668. {
  669. global_trigger_mask = mask;
  670. }
  671. #endif
  672. void per_cpu_trap_init(unsigned long cpu)
  673. {
  674. TBIRES int_context;
  675. unsigned int thread = cpu_2_hwthread_id[cpu];
  676. set_trigger_mask(TBI_INTS_INIT(thread) | /* interrupts */
  677. TBI_TRIG_BIT(TBID_SIGNUM_LWK) | /* low level kick */
  678. TBI_TRIG_BIT(TBID_SIGNUM_SW1));
  679. /* non-priv - use current stack */
  680. int_context.Sig.pCtx = NULL;
  681. /* Start with interrupts off */
  682. int_context.Sig.TrigMask = INTS_OFF_MASK;
  683. int_context.Sig.SaveMask = 0;
  684. /* And call __TBIASyncTrigger() */
  685. __TBIASyncTrigger(int_context);
  686. }
  687. void __init trap_init(void)
  688. {
  689. unsigned long cpu = smp_processor_id();
  690. PTBI _pTBI = per_cpu(pTBI, cpu);
  691. _pTBI->fnSigs[TBID_SIGNUM_XXF] = fault_handler;
  692. _pTBI->fnSigs[TBID_SIGNUM_SW0] = switchx_handler;
  693. _pTBI->fnSigs[TBID_SIGNUM_SW1] = switch1_handler;
  694. _pTBI->fnSigs[TBID_SIGNUM_SW2] = switchx_handler;
  695. _pTBI->fnSigs[TBID_SIGNUM_SW3] = switchx_handler;
  696. _pTBI->fnSigs[TBID_SIGNUM_LWK] = kick_handler;
  697. #ifdef CONFIG_METAG_META21
  698. _pTBI->fnSigs[TBID_SIGNUM_DFR] = __TBIHandleDFR;
  699. _pTBI->fnSigs[TBID_SIGNUM_FPE] = fpe_handler;
  700. #endif
  701. per_cpu_trap_init(cpu);
  702. }
  703. void tbi_startup_interrupt(int irq)
  704. {
  705. unsigned long cpu = smp_processor_id();
  706. PTBI _pTBI = per_cpu(pTBI, cpu);
  707. BUG_ON(irq > TBID_SIGNUM_MAX);
  708. /* For TR1 and TR2, the thread id is encoded in the irq number */
  709. if (irq >= TBID_SIGNUM_T10 && irq < TBID_SIGNUM_TR3)
  710. cpu = hwthread_id_2_cpu[(irq - TBID_SIGNUM_T10) % 4];
  711. set_trigger_mask(get_trigger_mask() | TBI_TRIG_BIT(irq));
  712. _pTBI->fnSigs[irq] = trigger_handler;
  713. }
  714. void tbi_shutdown_interrupt(int irq)
  715. {
  716. unsigned long cpu = smp_processor_id();
  717. PTBI _pTBI = per_cpu(pTBI, cpu);
  718. BUG_ON(irq > TBID_SIGNUM_MAX);
  719. set_trigger_mask(get_trigger_mask() & ~TBI_TRIG_BIT(irq));
  720. _pTBI->fnSigs[irq] = __TBIUnExpXXX;
  721. }
  722. int ret_from_fork(TBIRES arg)
  723. {
  724. struct task_struct *prev = arg.Switch.pPara;
  725. struct task_struct *tsk = current;
  726. struct pt_regs *regs = task_pt_regs(tsk);
  727. int (*fn)(void *);
  728. TBIRES Next;
  729. schedule_tail(prev);
  730. if (tsk->flags & PF_KTHREAD) {
  731. fn = (void *)regs->ctx.DX[4].U1;
  732. BUG_ON(!fn);
  733. fn((void *)regs->ctx.DX[3].U1);
  734. }
  735. if (test_syscall_work())
  736. syscall_trace_leave(regs);
  737. preempt_disable();
  738. Next.Sig.TrigMask = get_trigger_mask();
  739. Next.Sig.SaveMask = 0;
  740. Next.Sig.pCtx = &regs->ctx;
  741. set_gateway_tls(current->thread.tls_ptr);
  742. preempt_enable_no_resched();
  743. /* And interrupts should come back on when we resume the real usermode
  744. * code. Call __TBIASyncResume()
  745. */
  746. __TBIASyncResume(tail_end(Next));
  747. /* ASyncResume should NEVER return */
  748. BUG();
  749. return 0;
  750. }
  751. void show_trace(struct task_struct *tsk, unsigned long *sp,
  752. struct pt_regs *regs)
  753. {
  754. unsigned long addr;
  755. #ifdef CONFIG_FRAME_POINTER
  756. unsigned long fp, fpnew;
  757. unsigned long stack;
  758. #endif
  759. if (regs && user_mode(regs))
  760. return;
  761. printk("\nCall trace: ");
  762. #ifdef CONFIG_KALLSYMS
  763. printk("\n");
  764. #endif
  765. if (!tsk)
  766. tsk = current;
  767. #ifdef CONFIG_FRAME_POINTER
  768. if (regs) {
  769. print_ip_sym(regs->ctx.CurrPC);
  770. fp = regs->ctx.AX[1].U0;
  771. } else {
  772. fp = __core_reg_get(A0FrP);
  773. }
  774. /* detect when the frame pointer has been used for other purposes and
  775. * doesn't point to the stack (it may point completely elsewhere which
  776. * kstack_end may not detect).
  777. */
  778. stack = (unsigned long)task_stack_page(tsk);
  779. while (fp >= stack && fp + 8 <= stack + THREAD_SIZE) {
  780. addr = __raw_readl((unsigned long *)(fp + 4)) - 4;
  781. if (kernel_text_address(addr))
  782. print_ip_sym(addr);
  783. else
  784. break;
  785. /* stack grows up, so frame pointers must decrease */
  786. fpnew = __raw_readl((unsigned long *)(fp + 0));
  787. if (fpnew >= fp)
  788. break;
  789. fp = fpnew;
  790. }
  791. #else
  792. while (!kstack_end(sp)) {
  793. addr = (*sp--) - 4;
  794. if (kernel_text_address(addr))
  795. print_ip_sym(addr);
  796. }
  797. #endif
  798. printk("\n");
  799. debug_show_held_locks(tsk);
  800. }
  801. void show_stack(struct task_struct *tsk, unsigned long *sp)
  802. {
  803. if (!tsk)
  804. tsk = current;
  805. if (tsk == current)
  806. sp = (unsigned long *)current_stack_pointer;
  807. else
  808. sp = (unsigned long *)tsk->thread.kernel_context->AX[0].U0;
  809. show_trace(tsk, sp, NULL);
  810. }