process.c 43 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661
  1. /*
  2. * Derived from "arch/i386/kernel/process.c"
  3. * Copyright (C) 1995 Linus Torvalds
  4. *
  5. * Updated and modified by Cort Dougan (cort@cs.nmt.edu) and
  6. * Paul Mackerras (paulus@cs.anu.edu.au)
  7. *
  8. * PowerPC version
  9. * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
  10. *
  11. * This program is free software; you can redistribute it and/or
  12. * modify it under the terms of the GNU General Public License
  13. * as published by the Free Software Foundation; either version
  14. * 2 of the License, or (at your option) any later version.
  15. */
  16. #include <linux/errno.h>
  17. #include <linux/sched.h>
  18. #include <linux/kernel.h>
  19. #include <linux/mm.h>
  20. #include <linux/smp.h>
  21. #include <linux/stddef.h>
  22. #include <linux/unistd.h>
  23. #include <linux/ptrace.h>
  24. #include <linux/slab.h>
  25. #include <linux/user.h>
  26. #include <linux/elf.h>
  27. #include <linux/prctl.h>
  28. #include <linux/init_task.h>
  29. #include <linux/export.h>
  30. #include <linux/kallsyms.h>
  31. #include <linux/mqueue.h>
  32. #include <linux/hardirq.h>
  33. #include <linux/utsname.h>
  34. #include <linux/ftrace.h>
  35. #include <linux/kernel_stat.h>
  36. #include <linux/personality.h>
  37. #include <linux/random.h>
  38. #include <linux/hw_breakpoint.h>
  39. #include <linux/uaccess.h>
  40. #include <asm/pgtable.h>
  41. #include <asm/io.h>
  42. #include <asm/processor.h>
  43. #include <asm/mmu.h>
  44. #include <asm/prom.h>
  45. #include <asm/machdep.h>
  46. #include <asm/time.h>
  47. #include <asm/runlatch.h>
  48. #include <asm/syscalls.h>
  49. #include <asm/switch_to.h>
  50. #include <asm/tm.h>
  51. #include <asm/debug.h>
  52. #ifdef CONFIG_PPC64
  53. #include <asm/firmware.h>
  54. #endif
  55. #include <asm/code-patching.h>
  56. #include <linux/kprobes.h>
  57. #include <linux/kdebug.h>
  58. /* Transactional Memory debug */
  59. #ifdef TM_DEBUG_SW
  60. #define TM_DEBUG(x...) printk(KERN_INFO x)
  61. #else
  62. #define TM_DEBUG(x...) do { } while(0)
  63. #endif
  64. extern unsigned long _get_SP(void);
  65. #ifndef CONFIG_SMP
  66. struct task_struct *last_task_used_math = NULL;
  67. struct task_struct *last_task_used_altivec = NULL;
  68. struct task_struct *last_task_used_vsx = NULL;
  69. struct task_struct *last_task_used_spe = NULL;
  70. #endif
  71. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  72. void giveup_fpu_maybe_transactional(struct task_struct *tsk)
  73. {
  74. /*
  75. * If we are saving the current thread's registers, and the
  76. * thread is in a transactional state, set the TIF_RESTORE_TM
  77. * bit so that we know to restore the registers before
  78. * returning to userspace.
  79. */
  80. if (tsk == current && tsk->thread.regs &&
  81. MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
  82. !test_thread_flag(TIF_RESTORE_TM)) {
  83. tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
  84. set_thread_flag(TIF_RESTORE_TM);
  85. }
  86. giveup_fpu(tsk);
  87. }
  88. void giveup_altivec_maybe_transactional(struct task_struct *tsk)
  89. {
  90. /*
  91. * If we are saving the current thread's registers, and the
  92. * thread is in a transactional state, set the TIF_RESTORE_TM
  93. * bit so that we know to restore the registers before
  94. * returning to userspace.
  95. */
  96. if (tsk == current && tsk->thread.regs &&
  97. MSR_TM_ACTIVE(tsk->thread.regs->msr) &&
  98. !test_thread_flag(TIF_RESTORE_TM)) {
  99. tsk->thread.tm_orig_msr = tsk->thread.regs->msr;
  100. set_thread_flag(TIF_RESTORE_TM);
  101. }
  102. giveup_altivec(tsk);
  103. }
  104. #else
  105. #define giveup_fpu_maybe_transactional(tsk) giveup_fpu(tsk)
  106. #define giveup_altivec_maybe_transactional(tsk) giveup_altivec(tsk)
  107. #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
  108. #ifdef CONFIG_PPC_FPU
  109. /*
  110. * Make sure the floating-point register state in the
  111. * the thread_struct is up to date for task tsk.
  112. */
  113. void flush_fp_to_thread(struct task_struct *tsk)
  114. {
  115. if (tsk->thread.regs) {
  116. /*
  117. * We need to disable preemption here because if we didn't,
  118. * another process could get scheduled after the regs->msr
  119. * test but before we have finished saving the FP registers
  120. * to the thread_struct. That process could take over the
  121. * FPU, and then when we get scheduled again we would store
  122. * bogus values for the remaining FP registers.
  123. */
  124. preempt_disable();
  125. if (tsk->thread.regs->msr & MSR_FP) {
  126. #ifdef CONFIG_SMP
  127. /*
  128. * This should only ever be called for current or
  129. * for a stopped child process. Since we save away
  130. * the FP register state on context switch on SMP,
  131. * there is something wrong if a stopped child appears
  132. * to still have its FP state in the CPU registers.
  133. */
  134. BUG_ON(tsk != current);
  135. #endif
  136. giveup_fpu_maybe_transactional(tsk);
  137. }
  138. preempt_enable();
  139. }
  140. }
  141. EXPORT_SYMBOL_GPL(flush_fp_to_thread);
  142. #endif /* CONFIG_PPC_FPU */
  143. void enable_kernel_fp(void)
  144. {
  145. WARN_ON(preemptible());
  146. #ifdef CONFIG_SMP
  147. if (current->thread.regs && (current->thread.regs->msr & MSR_FP))
  148. giveup_fpu_maybe_transactional(current);
  149. else
  150. giveup_fpu(NULL); /* just enables FP for kernel */
  151. #else
  152. giveup_fpu_maybe_transactional(last_task_used_math);
  153. #endif /* CONFIG_SMP */
  154. }
  155. EXPORT_SYMBOL(enable_kernel_fp);
  156. #ifdef CONFIG_ALTIVEC
  157. void enable_kernel_altivec(void)
  158. {
  159. WARN_ON(preemptible());
  160. #ifdef CONFIG_SMP
  161. if (current->thread.regs && (current->thread.regs->msr & MSR_VEC))
  162. giveup_altivec_maybe_transactional(current);
  163. else
  164. giveup_altivec_notask();
  165. #else
  166. giveup_altivec_maybe_transactional(last_task_used_altivec);
  167. #endif /* CONFIG_SMP */
  168. }
  169. EXPORT_SYMBOL(enable_kernel_altivec);
  170. /*
  171. * Make sure the VMX/Altivec register state in the
  172. * the thread_struct is up to date for task tsk.
  173. */
  174. void flush_altivec_to_thread(struct task_struct *tsk)
  175. {
  176. if (tsk->thread.regs) {
  177. preempt_disable();
  178. if (tsk->thread.regs->msr & MSR_VEC) {
  179. #ifdef CONFIG_SMP
  180. BUG_ON(tsk != current);
  181. #endif
  182. giveup_altivec_maybe_transactional(tsk);
  183. }
  184. preempt_enable();
  185. }
  186. }
  187. EXPORT_SYMBOL_GPL(flush_altivec_to_thread);
  188. #endif /* CONFIG_ALTIVEC */
  189. #ifdef CONFIG_VSX
  190. #if 0
  191. /* not currently used, but some crazy RAID module might want to later */
  192. void enable_kernel_vsx(void)
  193. {
  194. WARN_ON(preemptible());
  195. #ifdef CONFIG_SMP
  196. if (current->thread.regs && (current->thread.regs->msr & MSR_VSX))
  197. giveup_vsx(current);
  198. else
  199. giveup_vsx(NULL); /* just enable vsx for kernel - force */
  200. #else
  201. giveup_vsx(last_task_used_vsx);
  202. #endif /* CONFIG_SMP */
  203. }
  204. EXPORT_SYMBOL(enable_kernel_vsx);
  205. #endif
  206. void giveup_vsx(struct task_struct *tsk)
  207. {
  208. giveup_fpu_maybe_transactional(tsk);
  209. giveup_altivec_maybe_transactional(tsk);
  210. __giveup_vsx(tsk);
  211. }
  212. EXPORT_SYMBOL(giveup_vsx);
  213. void flush_vsx_to_thread(struct task_struct *tsk)
  214. {
  215. if (tsk->thread.regs) {
  216. preempt_disable();
  217. if (tsk->thread.regs->msr & MSR_VSX) {
  218. #ifdef CONFIG_SMP
  219. BUG_ON(tsk != current);
  220. #endif
  221. giveup_vsx(tsk);
  222. }
  223. preempt_enable();
  224. }
  225. }
  226. EXPORT_SYMBOL_GPL(flush_vsx_to_thread);
  227. #endif /* CONFIG_VSX */
  228. #ifdef CONFIG_SPE
  229. void enable_kernel_spe(void)
  230. {
  231. WARN_ON(preemptible());
  232. #ifdef CONFIG_SMP
  233. if (current->thread.regs && (current->thread.regs->msr & MSR_SPE))
  234. giveup_spe(current);
  235. else
  236. giveup_spe(NULL); /* just enable SPE for kernel - force */
  237. #else
  238. giveup_spe(last_task_used_spe);
  239. #endif /* __SMP __ */
  240. }
  241. EXPORT_SYMBOL(enable_kernel_spe);
  242. void flush_spe_to_thread(struct task_struct *tsk)
  243. {
  244. if (tsk->thread.regs) {
  245. preempt_disable();
  246. if (tsk->thread.regs->msr & MSR_SPE) {
  247. #ifdef CONFIG_SMP
  248. BUG_ON(tsk != current);
  249. #endif
  250. tsk->thread.spefscr = mfspr(SPRN_SPEFSCR);
  251. giveup_spe(tsk);
  252. }
  253. preempt_enable();
  254. }
  255. }
  256. #endif /* CONFIG_SPE */
  257. #ifndef CONFIG_SMP
  258. /*
  259. * If we are doing lazy switching of CPU state (FP, altivec or SPE),
  260. * and the current task has some state, discard it.
  261. */
  262. void discard_lazy_cpu_state(void)
  263. {
  264. preempt_disable();
  265. if (last_task_used_math == current)
  266. last_task_used_math = NULL;
  267. #ifdef CONFIG_ALTIVEC
  268. if (last_task_used_altivec == current)
  269. last_task_used_altivec = NULL;
  270. #endif /* CONFIG_ALTIVEC */
  271. #ifdef CONFIG_VSX
  272. if (last_task_used_vsx == current)
  273. last_task_used_vsx = NULL;
  274. #endif /* CONFIG_VSX */
  275. #ifdef CONFIG_SPE
  276. if (last_task_used_spe == current)
  277. last_task_used_spe = NULL;
  278. #endif
  279. preempt_enable();
  280. }
  281. #endif /* CONFIG_SMP */
  282. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  283. void do_send_trap(struct pt_regs *regs, unsigned long address,
  284. unsigned long error_code, int signal_code, int breakpt)
  285. {
  286. siginfo_t info;
  287. current->thread.trap_nr = signal_code;
  288. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  289. 11, SIGSEGV) == NOTIFY_STOP)
  290. return;
  291. /* Deliver the signal to userspace */
  292. info.si_signo = SIGTRAP;
  293. info.si_errno = breakpt; /* breakpoint or watchpoint id */
  294. info.si_code = signal_code;
  295. info.si_addr = (void __user *)address;
  296. force_sig_info(SIGTRAP, &info, current);
  297. }
  298. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  299. void do_break (struct pt_regs *regs, unsigned long address,
  300. unsigned long error_code)
  301. {
  302. siginfo_t info;
  303. current->thread.trap_nr = TRAP_HWBKPT;
  304. if (notify_die(DIE_DABR_MATCH, "dabr_match", regs, error_code,
  305. 11, SIGSEGV) == NOTIFY_STOP)
  306. return;
  307. if (debugger_break_match(regs))
  308. return;
  309. /* Clear the breakpoint */
  310. hw_breakpoint_disable();
  311. /* Deliver the signal to userspace */
  312. info.si_signo = SIGTRAP;
  313. info.si_errno = 0;
  314. info.si_code = TRAP_HWBKPT;
  315. info.si_addr = (void __user *)address;
  316. force_sig_info(SIGTRAP, &info, current);
  317. }
  318. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  319. static DEFINE_PER_CPU(struct arch_hw_breakpoint, current_brk);
  320. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  321. /*
  322. * Set the debug registers back to their default "safe" values.
  323. */
  324. static void set_debug_reg_defaults(struct thread_struct *thread)
  325. {
  326. thread->debug.iac1 = thread->debug.iac2 = 0;
  327. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  328. thread->debug.iac3 = thread->debug.iac4 = 0;
  329. #endif
  330. thread->debug.dac1 = thread->debug.dac2 = 0;
  331. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  332. thread->debug.dvc1 = thread->debug.dvc2 = 0;
  333. #endif
  334. thread->debug.dbcr0 = 0;
  335. #ifdef CONFIG_BOOKE
  336. /*
  337. * Force User/Supervisor bits to b11 (user-only MSR[PR]=1)
  338. */
  339. thread->debug.dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US |
  340. DBCR1_IAC3US | DBCR1_IAC4US;
  341. /*
  342. * Force Data Address Compare User/Supervisor bits to be User-only
  343. * (0b11 MSR[PR]=1) and set all other bits in DBCR2 register to be 0.
  344. */
  345. thread->debug.dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
  346. #else
  347. thread->debug.dbcr1 = 0;
  348. #endif
  349. }
  350. static void prime_debug_regs(struct debug_reg *debug)
  351. {
  352. /*
  353. * We could have inherited MSR_DE from userspace, since
  354. * it doesn't get cleared on exception entry. Make sure
  355. * MSR_DE is clear before we enable any debug events.
  356. */
  357. mtmsr(mfmsr() & ~MSR_DE);
  358. mtspr(SPRN_IAC1, debug->iac1);
  359. mtspr(SPRN_IAC2, debug->iac2);
  360. #if CONFIG_PPC_ADV_DEBUG_IACS > 2
  361. mtspr(SPRN_IAC3, debug->iac3);
  362. mtspr(SPRN_IAC4, debug->iac4);
  363. #endif
  364. mtspr(SPRN_DAC1, debug->dac1);
  365. mtspr(SPRN_DAC2, debug->dac2);
  366. #if CONFIG_PPC_ADV_DEBUG_DVCS > 0
  367. mtspr(SPRN_DVC1, debug->dvc1);
  368. mtspr(SPRN_DVC2, debug->dvc2);
  369. #endif
  370. mtspr(SPRN_DBCR0, debug->dbcr0);
  371. mtspr(SPRN_DBCR1, debug->dbcr1);
  372. #ifdef CONFIG_BOOKE
  373. mtspr(SPRN_DBCR2, debug->dbcr2);
  374. #endif
  375. }
  376. /*
  377. * Unless neither the old or new thread are making use of the
  378. * debug registers, set the debug registers from the values
  379. * stored in the new thread.
  380. */
  381. void switch_booke_debug_regs(struct debug_reg *new_debug)
  382. {
  383. if ((current->thread.debug.dbcr0 & DBCR0_IDM)
  384. || (new_debug->dbcr0 & DBCR0_IDM))
  385. prime_debug_regs(new_debug);
  386. }
  387. EXPORT_SYMBOL_GPL(switch_booke_debug_regs);
  388. #else /* !CONFIG_PPC_ADV_DEBUG_REGS */
  389. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  390. static void set_debug_reg_defaults(struct thread_struct *thread)
  391. {
  392. thread->hw_brk.address = 0;
  393. thread->hw_brk.type = 0;
  394. set_breakpoint(&thread->hw_brk);
  395. }
  396. #endif /* !CONFIG_HAVE_HW_BREAKPOINT */
  397. #endif /* CONFIG_PPC_ADV_DEBUG_REGS */
  398. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  399. static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
  400. {
  401. mtspr(SPRN_DAC1, dabr);
  402. #ifdef CONFIG_PPC_47x
  403. isync();
  404. #endif
  405. return 0;
  406. }
  407. #elif defined(CONFIG_PPC_BOOK3S)
  408. static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
  409. {
  410. mtspr(SPRN_DABR, dabr);
  411. if (cpu_has_feature(CPU_FTR_DABRX))
  412. mtspr(SPRN_DABRX, dabrx);
  413. return 0;
  414. }
  415. #else
  416. static inline int __set_dabr(unsigned long dabr, unsigned long dabrx)
  417. {
  418. return -EINVAL;
  419. }
  420. #endif
  421. static inline int set_dabr(struct arch_hw_breakpoint *brk)
  422. {
  423. unsigned long dabr, dabrx;
  424. dabr = brk->address | (brk->type & HW_BRK_TYPE_DABR);
  425. dabrx = ((brk->type >> 3) & 0x7);
  426. if (ppc_md.set_dabr)
  427. return ppc_md.set_dabr(dabr, dabrx);
  428. return __set_dabr(dabr, dabrx);
  429. }
  430. static inline int set_dawr(struct arch_hw_breakpoint *brk)
  431. {
  432. unsigned long dawr, dawrx, mrd;
  433. dawr = brk->address;
  434. dawrx = (brk->type & (HW_BRK_TYPE_READ | HW_BRK_TYPE_WRITE)) \
  435. << (63 - 58); //* read/write bits */
  436. dawrx |= ((brk->type & (HW_BRK_TYPE_TRANSLATE)) >> 2) \
  437. << (63 - 59); //* translate */
  438. dawrx |= (brk->type & (HW_BRK_TYPE_PRIV_ALL)) \
  439. >> 3; //* PRIM bits */
  440. /* dawr length is stored in field MDR bits 48:53. Matches range in
  441. doublewords (64 bits) baised by -1 eg. 0b000000=1DW and
  442. 0b111111=64DW.
  443. brk->len is in bytes.
  444. This aligns up to double word size, shifts and does the bias.
  445. */
  446. mrd = ((brk->len + 7) >> 3) - 1;
  447. dawrx |= (mrd & 0x3f) << (63 - 53);
  448. if (ppc_md.set_dawr)
  449. return ppc_md.set_dawr(dawr, dawrx);
  450. mtspr(SPRN_DAWR, dawr);
  451. mtspr(SPRN_DAWRX, dawrx);
  452. return 0;
  453. }
  454. void __set_breakpoint(struct arch_hw_breakpoint *brk)
  455. {
  456. memcpy(this_cpu_ptr(&current_brk), brk, sizeof(*brk));
  457. if (cpu_has_feature(CPU_FTR_DAWR))
  458. set_dawr(brk);
  459. else
  460. set_dabr(brk);
  461. }
  462. void set_breakpoint(struct arch_hw_breakpoint *brk)
  463. {
  464. preempt_disable();
  465. __set_breakpoint(brk);
  466. preempt_enable();
  467. }
  468. #ifdef CONFIG_PPC64
  469. DEFINE_PER_CPU(struct cpu_usage, cpu_usage_array);
  470. #endif
  471. static inline bool hw_brk_match(struct arch_hw_breakpoint *a,
  472. struct arch_hw_breakpoint *b)
  473. {
  474. if (a->address != b->address)
  475. return false;
  476. if (a->type != b->type)
  477. return false;
  478. if (a->len != b->len)
  479. return false;
  480. return true;
  481. }
  482. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  483. static void tm_reclaim_thread(struct thread_struct *thr,
  484. struct thread_info *ti, uint8_t cause)
  485. {
  486. unsigned long msr_diff = 0;
  487. /*
  488. * If FP/VSX registers have been already saved to the
  489. * thread_struct, move them to the transact_fp array.
  490. * We clear the TIF_RESTORE_TM bit since after the reclaim
  491. * the thread will no longer be transactional.
  492. */
  493. if (test_ti_thread_flag(ti, TIF_RESTORE_TM)) {
  494. msr_diff = thr->tm_orig_msr & ~thr->regs->msr;
  495. if (msr_diff & MSR_FP)
  496. memcpy(&thr->transact_fp, &thr->fp_state,
  497. sizeof(struct thread_fp_state));
  498. if (msr_diff & MSR_VEC)
  499. memcpy(&thr->transact_vr, &thr->vr_state,
  500. sizeof(struct thread_vr_state));
  501. clear_ti_thread_flag(ti, TIF_RESTORE_TM);
  502. msr_diff &= MSR_FP | MSR_VEC | MSR_VSX | MSR_FE0 | MSR_FE1;
  503. }
  504. tm_reclaim(thr, thr->regs->msr, cause);
  505. /* Having done the reclaim, we now have the checkpointed
  506. * FP/VSX values in the registers. These might be valid
  507. * even if we have previously called enable_kernel_fp() or
  508. * flush_fp_to_thread(), so update thr->regs->msr to
  509. * indicate their current validity.
  510. */
  511. thr->regs->msr |= msr_diff;
  512. }
  513. void tm_reclaim_current(uint8_t cause)
  514. {
  515. tm_enable();
  516. tm_reclaim_thread(&current->thread, current_thread_info(), cause);
  517. }
  518. static inline void tm_reclaim_task(struct task_struct *tsk)
  519. {
  520. /* We have to work out if we're switching from/to a task that's in the
  521. * middle of a transaction.
  522. *
  523. * In switching we need to maintain a 2nd register state as
  524. * oldtask->thread.ckpt_regs. We tm_reclaim(oldproc); this saves the
  525. * checkpointed (tbegin) state in ckpt_regs and saves the transactional
  526. * (current) FPRs into oldtask->thread.transact_fpr[].
  527. *
  528. * We also context switch (save) TFHAR/TEXASR/TFIAR in here.
  529. */
  530. struct thread_struct *thr = &tsk->thread;
  531. if (!thr->regs)
  532. return;
  533. if (!MSR_TM_ACTIVE(thr->regs->msr))
  534. goto out_and_saveregs;
  535. /* Stash the original thread MSR, as giveup_fpu et al will
  536. * modify it. We hold onto it to see whether the task used
  537. * FP & vector regs. If the TIF_RESTORE_TM flag is set,
  538. * tm_orig_msr is already set.
  539. */
  540. if (!test_ti_thread_flag(task_thread_info(tsk), TIF_RESTORE_TM))
  541. thr->tm_orig_msr = thr->regs->msr;
  542. TM_DEBUG("--- tm_reclaim on pid %d (NIP=%lx, "
  543. "ccr=%lx, msr=%lx, trap=%lx)\n",
  544. tsk->pid, thr->regs->nip,
  545. thr->regs->ccr, thr->regs->msr,
  546. thr->regs->trap);
  547. tm_reclaim_thread(thr, task_thread_info(tsk), TM_CAUSE_RESCHED);
  548. TM_DEBUG("--- tm_reclaim on pid %d complete\n",
  549. tsk->pid);
  550. out_and_saveregs:
  551. /* Always save the regs here, even if a transaction's not active.
  552. * This context-switches a thread's TM info SPRs. We do it here to
  553. * be consistent with the restore path (in recheckpoint) which
  554. * cannot happen later in _switch().
  555. */
  556. tm_save_sprs(thr);
  557. }
  558. extern void __tm_recheckpoint(struct thread_struct *thread,
  559. unsigned long orig_msr);
  560. void tm_recheckpoint(struct thread_struct *thread,
  561. unsigned long orig_msr)
  562. {
  563. unsigned long flags;
  564. /* We really can't be interrupted here as the TEXASR registers can't
  565. * change and later in the trecheckpoint code, we have a userspace R1.
  566. * So let's hard disable over this region.
  567. */
  568. local_irq_save(flags);
  569. hard_irq_disable();
  570. /* The TM SPRs are restored here, so that TEXASR.FS can be set
  571. * before the trecheckpoint and no explosion occurs.
  572. */
  573. tm_restore_sprs(thread);
  574. __tm_recheckpoint(thread, orig_msr);
  575. local_irq_restore(flags);
  576. }
  577. static inline void tm_recheckpoint_new_task(struct task_struct *new)
  578. {
  579. unsigned long msr;
  580. if (!cpu_has_feature(CPU_FTR_TM))
  581. return;
  582. /* Recheckpoint the registers of the thread we're about to switch to.
  583. *
  584. * If the task was using FP, we non-lazily reload both the original and
  585. * the speculative FP register states. This is because the kernel
  586. * doesn't see if/when a TM rollback occurs, so if we take an FP
  587. * unavoidable later, we are unable to determine which set of FP regs
  588. * need to be restored.
  589. */
  590. if (!new->thread.regs)
  591. return;
  592. if (!MSR_TM_ACTIVE(new->thread.regs->msr)){
  593. tm_restore_sprs(&new->thread);
  594. return;
  595. }
  596. msr = new->thread.tm_orig_msr;
  597. /* Recheckpoint to restore original checkpointed register state. */
  598. TM_DEBUG("*** tm_recheckpoint of pid %d "
  599. "(new->msr 0x%lx, new->origmsr 0x%lx)\n",
  600. new->pid, new->thread.regs->msr, msr);
  601. /* This loads the checkpointed FP/VEC state, if used */
  602. tm_recheckpoint(&new->thread, msr);
  603. /* This loads the speculative FP/VEC state, if used */
  604. if (msr & MSR_FP) {
  605. do_load_up_transact_fpu(&new->thread);
  606. new->thread.regs->msr |=
  607. (MSR_FP | new->thread.fpexc_mode);
  608. }
  609. #ifdef CONFIG_ALTIVEC
  610. if (msr & MSR_VEC) {
  611. do_load_up_transact_altivec(&new->thread);
  612. new->thread.regs->msr |= MSR_VEC;
  613. }
  614. #endif
  615. /* We may as well turn on VSX too since all the state is restored now */
  616. if (msr & MSR_VSX)
  617. new->thread.regs->msr |= MSR_VSX;
  618. TM_DEBUG("*** tm_recheckpoint of pid %d complete "
  619. "(kernel msr 0x%lx)\n",
  620. new->pid, mfmsr());
  621. }
  622. static inline void __switch_to_tm(struct task_struct *prev)
  623. {
  624. if (cpu_has_feature(CPU_FTR_TM)) {
  625. tm_enable();
  626. tm_reclaim_task(prev);
  627. }
  628. }
  629. /*
  630. * This is called if we are on the way out to userspace and the
  631. * TIF_RESTORE_TM flag is set. It checks if we need to reload
  632. * FP and/or vector state and does so if necessary.
  633. * If userspace is inside a transaction (whether active or
  634. * suspended) and FP/VMX/VSX instructions have ever been enabled
  635. * inside that transaction, then we have to keep them enabled
  636. * and keep the FP/VMX/VSX state loaded while ever the transaction
  637. * continues. The reason is that if we didn't, and subsequently
  638. * got a FP/VMX/VSX unavailable interrupt inside a transaction,
  639. * we don't know whether it's the same transaction, and thus we
  640. * don't know which of the checkpointed state and the transactional
  641. * state to use.
  642. */
  643. void restore_tm_state(struct pt_regs *regs)
  644. {
  645. unsigned long msr_diff;
  646. clear_thread_flag(TIF_RESTORE_TM);
  647. if (!MSR_TM_ACTIVE(regs->msr))
  648. return;
  649. msr_diff = current->thread.tm_orig_msr & ~regs->msr;
  650. msr_diff &= MSR_FP | MSR_VEC | MSR_VSX;
  651. if (msr_diff & MSR_FP) {
  652. fp_enable();
  653. load_fp_state(&current->thread.fp_state);
  654. regs->msr |= current->thread.fpexc_mode;
  655. }
  656. if (msr_diff & MSR_VEC) {
  657. vec_enable();
  658. load_vr_state(&current->thread.vr_state);
  659. }
  660. regs->msr |= msr_diff;
  661. }
  662. #else
  663. #define tm_recheckpoint_new_task(new)
  664. #define __switch_to_tm(prev)
  665. #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
  666. struct task_struct *__switch_to(struct task_struct *prev,
  667. struct task_struct *new)
  668. {
  669. struct thread_struct *new_thread, *old_thread;
  670. struct task_struct *last;
  671. #ifdef CONFIG_PPC_BOOK3S_64
  672. struct ppc64_tlb_batch *batch;
  673. #endif
  674. WARN_ON(!irqs_disabled());
  675. /* Back up the TAR and DSCR across context switches.
  676. * Note that the TAR is not available for use in the kernel. (To
  677. * provide this, the TAR should be backed up/restored on exception
  678. * entry/exit instead, and be in pt_regs. FIXME, this should be in
  679. * pt_regs anyway (for debug).)
  680. * Save the TAR and DSCR here before we do treclaim/trecheckpoint as
  681. * these will change them.
  682. */
  683. save_early_sprs(&prev->thread);
  684. __switch_to_tm(prev);
  685. #ifdef CONFIG_SMP
  686. /* avoid complexity of lazy save/restore of fpu
  687. * by just saving it every time we switch out if
  688. * this task used the fpu during the last quantum.
  689. *
  690. * If it tries to use the fpu again, it'll trap and
  691. * reload its fp regs. So we don't have to do a restore
  692. * every switch, just a save.
  693. * -- Cort
  694. */
  695. if (prev->thread.regs && (prev->thread.regs->msr & MSR_FP))
  696. giveup_fpu(prev);
  697. #ifdef CONFIG_ALTIVEC
  698. /*
  699. * If the previous thread used altivec in the last quantum
  700. * (thus changing altivec regs) then save them.
  701. * We used to check the VRSAVE register but not all apps
  702. * set it, so we don't rely on it now (and in fact we need
  703. * to save & restore VSCR even if VRSAVE == 0). -- paulus
  704. *
  705. * On SMP we always save/restore altivec regs just to avoid the
  706. * complexity of changing processors.
  707. * -- Cort
  708. */
  709. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VEC))
  710. giveup_altivec(prev);
  711. #endif /* CONFIG_ALTIVEC */
  712. #ifdef CONFIG_VSX
  713. if (prev->thread.regs && (prev->thread.regs->msr & MSR_VSX))
  714. /* VMX and FPU registers are already save here */
  715. __giveup_vsx(prev);
  716. #endif /* CONFIG_VSX */
  717. #ifdef CONFIG_SPE
  718. /*
  719. * If the previous thread used spe in the last quantum
  720. * (thus changing spe regs) then save them.
  721. *
  722. * On SMP we always save/restore spe regs just to avoid the
  723. * complexity of changing processors.
  724. */
  725. if ((prev->thread.regs && (prev->thread.regs->msr & MSR_SPE)))
  726. giveup_spe(prev);
  727. #endif /* CONFIG_SPE */
  728. #else /* CONFIG_SMP */
  729. #ifdef CONFIG_ALTIVEC
  730. /* Avoid the trap. On smp this this never happens since
  731. * we don't set last_task_used_altivec -- Cort
  732. */
  733. if (new->thread.regs && last_task_used_altivec == new)
  734. new->thread.regs->msr |= MSR_VEC;
  735. #endif /* CONFIG_ALTIVEC */
  736. #ifdef CONFIG_VSX
  737. if (new->thread.regs && last_task_used_vsx == new)
  738. new->thread.regs->msr |= MSR_VSX;
  739. #endif /* CONFIG_VSX */
  740. #ifdef CONFIG_SPE
  741. /* Avoid the trap. On smp this this never happens since
  742. * we don't set last_task_used_spe
  743. */
  744. if (new->thread.regs && last_task_used_spe == new)
  745. new->thread.regs->msr |= MSR_SPE;
  746. #endif /* CONFIG_SPE */
  747. #endif /* CONFIG_SMP */
  748. #ifdef CONFIG_PPC_ADV_DEBUG_REGS
  749. switch_booke_debug_regs(&new->thread.debug);
  750. #else
  751. /*
  752. * For PPC_BOOK3S_64, we use the hw-breakpoint interfaces that would
  753. * schedule DABR
  754. */
  755. #ifndef CONFIG_HAVE_HW_BREAKPOINT
  756. if (unlikely(!hw_brk_match(this_cpu_ptr(&current_brk), &new->thread.hw_brk)))
  757. __set_breakpoint(&new->thread.hw_brk);
  758. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  759. #endif
  760. new_thread = &new->thread;
  761. old_thread = &current->thread;
  762. #ifdef CONFIG_PPC64
  763. /*
  764. * Collect processor utilization data per process
  765. */
  766. if (firmware_has_feature(FW_FEATURE_SPLPAR)) {
  767. struct cpu_usage *cu = this_cpu_ptr(&cpu_usage_array);
  768. long unsigned start_tb, current_tb;
  769. start_tb = old_thread->start_tb;
  770. cu->current_tb = current_tb = mfspr(SPRN_PURR);
  771. old_thread->accum_tb += (current_tb - start_tb);
  772. new_thread->start_tb = current_tb;
  773. }
  774. #endif /* CONFIG_PPC64 */
  775. #ifdef CONFIG_PPC_BOOK3S_64
  776. batch = this_cpu_ptr(&ppc64_tlb_batch);
  777. if (batch->active) {
  778. current_thread_info()->local_flags |= _TLF_LAZY_MMU;
  779. if (batch->index)
  780. __flush_tlb_pending(batch);
  781. batch->active = 0;
  782. }
  783. #endif /* CONFIG_PPC_BOOK3S_64 */
  784. /*
  785. * We can't take a PMU exception inside _switch() since there is a
  786. * window where the kernel stack SLB and the kernel stack are out
  787. * of sync. Hard disable here.
  788. */
  789. hard_irq_disable();
  790. tm_recheckpoint_new_task(new);
  791. last = _switch(old_thread, new_thread);
  792. #ifdef CONFIG_PPC_BOOK3S_64
  793. if (current_thread_info()->local_flags & _TLF_LAZY_MMU) {
  794. current_thread_info()->local_flags &= ~_TLF_LAZY_MMU;
  795. batch = this_cpu_ptr(&ppc64_tlb_batch);
  796. batch->active = 1;
  797. }
  798. #endif /* CONFIG_PPC_BOOK3S_64 */
  799. return last;
  800. }
  801. static int instructions_to_print = 16;
  802. static void show_instructions(struct pt_regs *regs)
  803. {
  804. int i;
  805. unsigned long pc = regs->nip - (instructions_to_print * 3 / 4 *
  806. sizeof(int));
  807. printk("Instruction dump:");
  808. for (i = 0; i < instructions_to_print; i++) {
  809. int instr;
  810. if (!(i % 8))
  811. printk("\n");
  812. #if !defined(CONFIG_BOOKE)
  813. /* If executing with the IMMU off, adjust pc rather
  814. * than print XXXXXXXX.
  815. */
  816. if (!(regs->msr & MSR_IR))
  817. pc = (unsigned long)phys_to_virt(pc);
  818. #endif
  819. if (!__kernel_text_address(pc) ||
  820. probe_kernel_address((unsigned int __user *)pc, instr)) {
  821. printk(KERN_CONT "XXXXXXXX ");
  822. } else {
  823. if (regs->nip == pc)
  824. printk(KERN_CONT "<%08x> ", instr);
  825. else
  826. printk(KERN_CONT "%08x ", instr);
  827. }
  828. pc += sizeof(int);
  829. }
  830. printk("\n");
  831. }
  832. static struct regbit {
  833. unsigned long bit;
  834. const char *name;
  835. } msr_bits[] = {
  836. #if defined(CONFIG_PPC64) && !defined(CONFIG_BOOKE)
  837. {MSR_SF, "SF"},
  838. {MSR_HV, "HV"},
  839. #endif
  840. {MSR_VEC, "VEC"},
  841. {MSR_VSX, "VSX"},
  842. #ifdef CONFIG_BOOKE
  843. {MSR_CE, "CE"},
  844. #endif
  845. {MSR_EE, "EE"},
  846. {MSR_PR, "PR"},
  847. {MSR_FP, "FP"},
  848. {MSR_ME, "ME"},
  849. #ifdef CONFIG_BOOKE
  850. {MSR_DE, "DE"},
  851. #else
  852. {MSR_SE, "SE"},
  853. {MSR_BE, "BE"},
  854. #endif
  855. {MSR_IR, "IR"},
  856. {MSR_DR, "DR"},
  857. {MSR_PMM, "PMM"},
  858. #ifndef CONFIG_BOOKE
  859. {MSR_RI, "RI"},
  860. {MSR_LE, "LE"},
  861. #endif
  862. {0, NULL}
  863. };
  864. static void printbits(unsigned long val, struct regbit *bits)
  865. {
  866. const char *sep = "";
  867. printk("<");
  868. for (; bits->bit; ++bits)
  869. if (val & bits->bit) {
  870. printk("%s%s", sep, bits->name);
  871. sep = ",";
  872. }
  873. printk(">");
  874. }
  875. #ifdef CONFIG_PPC64
  876. #define REG "%016lx"
  877. #define REGS_PER_LINE 4
  878. #define LAST_VOLATILE 13
  879. #else
  880. #define REG "%08lx"
  881. #define REGS_PER_LINE 8
  882. #define LAST_VOLATILE 12
  883. #endif
  884. void show_regs(struct pt_regs * regs)
  885. {
  886. int i, trap;
  887. show_regs_print_info(KERN_DEFAULT);
  888. printk("NIP: "REG" LR: "REG" CTR: "REG"\n",
  889. regs->nip, regs->link, regs->ctr);
  890. printk("REGS: %p TRAP: %04lx %s (%s)\n",
  891. regs, regs->trap, print_tainted(), init_utsname()->release);
  892. printk("MSR: "REG" ", regs->msr);
  893. printbits(regs->msr, msr_bits);
  894. printk(" CR: %08lx XER: %08lx\n", regs->ccr, regs->xer);
  895. trap = TRAP(regs);
  896. if ((regs->trap != 0xc00) && cpu_has_feature(CPU_FTR_CFAR))
  897. printk("CFAR: "REG" ", regs->orig_gpr3);
  898. if (trap == 0x200 || trap == 0x300 || trap == 0x600)
  899. #if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
  900. printk("DEAR: "REG" ESR: "REG" ", regs->dar, regs->dsisr);
  901. #else
  902. printk("DAR: "REG" DSISR: %08lx ", regs->dar, regs->dsisr);
  903. #endif
  904. #ifdef CONFIG_PPC64
  905. printk("SOFTE: %ld ", regs->softe);
  906. #endif
  907. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  908. if (MSR_TM_ACTIVE(regs->msr))
  909. printk("\nPACATMSCRATCH: %016llx ", get_paca()->tm_scratch);
  910. #endif
  911. for (i = 0; i < 32; i++) {
  912. if ((i % REGS_PER_LINE) == 0)
  913. printk("\nGPR%02d: ", i);
  914. printk(REG " ", regs->gpr[i]);
  915. if (i == LAST_VOLATILE && !FULL_REGS(regs))
  916. break;
  917. }
  918. printk("\n");
  919. #ifdef CONFIG_KALLSYMS
  920. /*
  921. * Lookup NIP late so we have the best change of getting the
  922. * above info out without failing
  923. */
  924. printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
  925. printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
  926. #endif
  927. show_stack(current, (unsigned long *) regs->gpr[1]);
  928. if (!user_mode(regs))
  929. show_instructions(regs);
  930. }
  931. void exit_thread(void)
  932. {
  933. discard_lazy_cpu_state();
  934. }
  935. void flush_thread(void)
  936. {
  937. discard_lazy_cpu_state();
  938. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  939. flush_ptrace_hw_breakpoint(current);
  940. #else /* CONFIG_HAVE_HW_BREAKPOINT */
  941. set_debug_reg_defaults(&current->thread);
  942. #endif /* CONFIG_HAVE_HW_BREAKPOINT */
  943. }
  944. void
  945. release_thread(struct task_struct *t)
  946. {
  947. }
  948. /*
  949. * this gets called so that we can store coprocessor state into memory and
  950. * copy the current task into the new thread.
  951. */
  952. int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
  953. {
  954. flush_fp_to_thread(src);
  955. flush_altivec_to_thread(src);
  956. flush_vsx_to_thread(src);
  957. flush_spe_to_thread(src);
  958. /*
  959. * Flush TM state out so we can copy it. __switch_to_tm() does this
  960. * flush but it removes the checkpointed state from the current CPU and
  961. * transitions the CPU out of TM mode. Hence we need to call
  962. * tm_recheckpoint_new_task() (on the same task) to restore the
  963. * checkpointed state back and the TM mode.
  964. */
  965. __switch_to_tm(src);
  966. tm_recheckpoint_new_task(src);
  967. *dst = *src;
  968. clear_task_ebb(dst);
  969. return 0;
  970. }
  971. static void setup_ksp_vsid(struct task_struct *p, unsigned long sp)
  972. {
  973. #ifdef CONFIG_PPC_STD_MMU_64
  974. unsigned long sp_vsid;
  975. unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
  976. if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
  977. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_1T)
  978. << SLB_VSID_SHIFT_1T;
  979. else
  980. sp_vsid = get_kernel_vsid(sp, MMU_SEGSIZE_256M)
  981. << SLB_VSID_SHIFT;
  982. sp_vsid |= SLB_VSID_KERNEL | llp;
  983. p->thread.ksp_vsid = sp_vsid;
  984. #endif
  985. }
  986. /*
  987. * Copy a thread..
  988. */
  989. /*
  990. * Copy architecture-specific thread state
  991. */
  992. int copy_thread(unsigned long clone_flags, unsigned long usp,
  993. unsigned long kthread_arg, struct task_struct *p)
  994. {
  995. struct pt_regs *childregs, *kregs;
  996. extern void ret_from_fork(void);
  997. extern void ret_from_kernel_thread(void);
  998. void (*f)(void);
  999. unsigned long sp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
  1000. /* Copy registers */
  1001. sp -= sizeof(struct pt_regs);
  1002. childregs = (struct pt_regs *) sp;
  1003. if (unlikely(p->flags & PF_KTHREAD)) {
  1004. /* kernel thread */
  1005. struct thread_info *ti = (void *)task_stack_page(p);
  1006. memset(childregs, 0, sizeof(struct pt_regs));
  1007. childregs->gpr[1] = sp + sizeof(struct pt_regs);
  1008. /* function */
  1009. if (usp)
  1010. childregs->gpr[14] = ppc_function_entry((void *)usp);
  1011. #ifdef CONFIG_PPC64
  1012. clear_tsk_thread_flag(p, TIF_32BIT);
  1013. childregs->softe = 1;
  1014. #endif
  1015. childregs->gpr[15] = kthread_arg;
  1016. p->thread.regs = NULL; /* no user register state */
  1017. ti->flags |= _TIF_RESTOREALL;
  1018. f = ret_from_kernel_thread;
  1019. } else {
  1020. /* user thread */
  1021. struct pt_regs *regs = current_pt_regs();
  1022. CHECK_FULL_REGS(regs);
  1023. *childregs = *regs;
  1024. if (usp)
  1025. childregs->gpr[1] = usp;
  1026. p->thread.regs = childregs;
  1027. childregs->gpr[3] = 0; /* Result from fork() */
  1028. if (clone_flags & CLONE_SETTLS) {
  1029. #ifdef CONFIG_PPC64
  1030. if (!is_32bit_task())
  1031. childregs->gpr[13] = childregs->gpr[6];
  1032. else
  1033. #endif
  1034. childregs->gpr[2] = childregs->gpr[6];
  1035. }
  1036. f = ret_from_fork;
  1037. }
  1038. sp -= STACK_FRAME_OVERHEAD;
  1039. /*
  1040. * The way this works is that at some point in the future
  1041. * some task will call _switch to switch to the new task.
  1042. * That will pop off the stack frame created below and start
  1043. * the new task running at ret_from_fork. The new task will
  1044. * do some house keeping and then return from the fork or clone
  1045. * system call, using the stack frame created above.
  1046. */
  1047. ((unsigned long *)sp)[0] = 0;
  1048. sp -= sizeof(struct pt_regs);
  1049. kregs = (struct pt_regs *) sp;
  1050. sp -= STACK_FRAME_OVERHEAD;
  1051. p->thread.ksp = sp;
  1052. #ifdef CONFIG_PPC32
  1053. p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
  1054. _ALIGN_UP(sizeof(struct thread_info), 16);
  1055. #endif
  1056. #ifdef CONFIG_HAVE_HW_BREAKPOINT
  1057. p->thread.ptrace_bps[0] = NULL;
  1058. #endif
  1059. p->thread.fp_save_area = NULL;
  1060. #ifdef CONFIG_ALTIVEC
  1061. p->thread.vr_save_area = NULL;
  1062. #endif
  1063. setup_ksp_vsid(p, sp);
  1064. #ifdef CONFIG_PPC64
  1065. if (cpu_has_feature(CPU_FTR_DSCR)) {
  1066. p->thread.dscr_inherit = current->thread.dscr_inherit;
  1067. p->thread.dscr = current->thread.dscr;
  1068. }
  1069. if (cpu_has_feature(CPU_FTR_HAS_PPR))
  1070. p->thread.ppr = INIT_PPR;
  1071. #endif
  1072. kregs->nip = ppc_function_entry(f);
  1073. return 0;
  1074. }
  1075. /*
  1076. * Set up a thread for executing a new program
  1077. */
  1078. void start_thread(struct pt_regs *regs, unsigned long start, unsigned long sp)
  1079. {
  1080. #ifdef CONFIG_PPC64
  1081. unsigned long load_addr = regs->gpr[2]; /* saved by ELF_PLAT_INIT */
  1082. #endif
  1083. /*
  1084. * If we exec out of a kernel thread then thread.regs will not be
  1085. * set. Do it now.
  1086. */
  1087. if (!current->thread.regs) {
  1088. struct pt_regs *regs = task_stack_page(current) + THREAD_SIZE;
  1089. current->thread.regs = regs - 1;
  1090. }
  1091. memset(regs->gpr, 0, sizeof(regs->gpr));
  1092. regs->ctr = 0;
  1093. regs->link = 0;
  1094. regs->xer = 0;
  1095. regs->ccr = 0;
  1096. regs->gpr[1] = sp;
  1097. /*
  1098. * We have just cleared all the nonvolatile GPRs, so make
  1099. * FULL_REGS(regs) return true. This is necessary to allow
  1100. * ptrace to examine the thread immediately after exec.
  1101. */
  1102. regs->trap &= ~1UL;
  1103. #ifdef CONFIG_PPC32
  1104. regs->mq = 0;
  1105. regs->nip = start;
  1106. regs->msr = MSR_USER;
  1107. #else
  1108. if (!is_32bit_task()) {
  1109. unsigned long entry;
  1110. if (is_elf2_task()) {
  1111. /* Look ma, no function descriptors! */
  1112. entry = start;
  1113. /*
  1114. * Ulrich says:
  1115. * The latest iteration of the ABI requires that when
  1116. * calling a function (at its global entry point),
  1117. * the caller must ensure r12 holds the entry point
  1118. * address (so that the function can quickly
  1119. * establish addressability).
  1120. */
  1121. regs->gpr[12] = start;
  1122. /* Make sure that's restored on entry to userspace. */
  1123. set_thread_flag(TIF_RESTOREALL);
  1124. } else {
  1125. unsigned long toc;
  1126. /* start is a relocated pointer to the function
  1127. * descriptor for the elf _start routine. The first
  1128. * entry in the function descriptor is the entry
  1129. * address of _start and the second entry is the TOC
  1130. * value we need to use.
  1131. */
  1132. __get_user(entry, (unsigned long __user *)start);
  1133. __get_user(toc, (unsigned long __user *)start+1);
  1134. /* Check whether the e_entry function descriptor entries
  1135. * need to be relocated before we can use them.
  1136. */
  1137. if (load_addr != 0) {
  1138. entry += load_addr;
  1139. toc += load_addr;
  1140. }
  1141. regs->gpr[2] = toc;
  1142. }
  1143. regs->nip = entry;
  1144. regs->msr = MSR_USER64;
  1145. } else {
  1146. regs->nip = start;
  1147. regs->gpr[2] = 0;
  1148. regs->msr = MSR_USER32;
  1149. }
  1150. #endif
  1151. discard_lazy_cpu_state();
  1152. #ifdef CONFIG_VSX
  1153. current->thread.used_vsr = 0;
  1154. #endif
  1155. memset(&current->thread.fp_state, 0, sizeof(current->thread.fp_state));
  1156. current->thread.fp_save_area = NULL;
  1157. #ifdef CONFIG_ALTIVEC
  1158. memset(&current->thread.vr_state, 0, sizeof(current->thread.vr_state));
  1159. current->thread.vr_state.vscr.u[3] = 0x00010000; /* Java mode disabled */
  1160. current->thread.vr_save_area = NULL;
  1161. current->thread.vrsave = 0;
  1162. current->thread.used_vr = 0;
  1163. #endif /* CONFIG_ALTIVEC */
  1164. #ifdef CONFIG_SPE
  1165. memset(current->thread.evr, 0, sizeof(current->thread.evr));
  1166. current->thread.acc = 0;
  1167. current->thread.spefscr = 0;
  1168. current->thread.used_spe = 0;
  1169. #endif /* CONFIG_SPE */
  1170. #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  1171. if (cpu_has_feature(CPU_FTR_TM))
  1172. regs->msr |= MSR_TM;
  1173. current->thread.tm_tfhar = 0;
  1174. current->thread.tm_texasr = 0;
  1175. current->thread.tm_tfiar = 0;
  1176. #endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
  1177. }
  1178. EXPORT_SYMBOL(start_thread);
  1179. #define PR_FP_ALL_EXCEPT (PR_FP_EXC_DIV | PR_FP_EXC_OVF | PR_FP_EXC_UND \
  1180. | PR_FP_EXC_RES | PR_FP_EXC_INV)
  1181. int set_fpexc_mode(struct task_struct *tsk, unsigned int val)
  1182. {
  1183. struct pt_regs *regs = tsk->thread.regs;
  1184. /* This is a bit hairy. If we are an SPE enabled processor
  1185. * (have embedded fp) we store the IEEE exception enable flags in
  1186. * fpexc_mode. fpexc_mode is also used for setting FP exception
  1187. * mode (asyn, precise, disabled) for 'Classic' FP. */
  1188. if (val & PR_FP_EXC_SW_ENABLE) {
  1189. #ifdef CONFIG_SPE
  1190. if (cpu_has_feature(CPU_FTR_SPE)) {
  1191. /*
  1192. * When the sticky exception bits are set
  1193. * directly by userspace, it must call prctl
  1194. * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
  1195. * in the existing prctl settings) or
  1196. * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
  1197. * the bits being set). <fenv.h> functions
  1198. * saving and restoring the whole
  1199. * floating-point environment need to do so
  1200. * anyway to restore the prctl settings from
  1201. * the saved environment.
  1202. */
  1203. tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
  1204. tsk->thread.fpexc_mode = val &
  1205. (PR_FP_EXC_SW_ENABLE | PR_FP_ALL_EXCEPT);
  1206. return 0;
  1207. } else {
  1208. return -EINVAL;
  1209. }
  1210. #else
  1211. return -EINVAL;
  1212. #endif
  1213. }
  1214. /* on a CONFIG_SPE this does not hurt us. The bits that
  1215. * __pack_fe01 use do not overlap with bits used for
  1216. * PR_FP_EXC_SW_ENABLE. Additionally, the MSR[FE0,FE1] bits
  1217. * on CONFIG_SPE implementations are reserved so writing to
  1218. * them does not change anything */
  1219. if (val > PR_FP_EXC_PRECISE)
  1220. return -EINVAL;
  1221. tsk->thread.fpexc_mode = __pack_fe01(val);
  1222. if (regs != NULL && (regs->msr & MSR_FP) != 0)
  1223. regs->msr = (regs->msr & ~(MSR_FE0|MSR_FE1))
  1224. | tsk->thread.fpexc_mode;
  1225. return 0;
  1226. }
  1227. int get_fpexc_mode(struct task_struct *tsk, unsigned long adr)
  1228. {
  1229. unsigned int val;
  1230. if (tsk->thread.fpexc_mode & PR_FP_EXC_SW_ENABLE)
  1231. #ifdef CONFIG_SPE
  1232. if (cpu_has_feature(CPU_FTR_SPE)) {
  1233. /*
  1234. * When the sticky exception bits are set
  1235. * directly by userspace, it must call prctl
  1236. * with PR_GET_FPEXC (with PR_FP_EXC_SW_ENABLE
  1237. * in the existing prctl settings) or
  1238. * PR_SET_FPEXC (with PR_FP_EXC_SW_ENABLE in
  1239. * the bits being set). <fenv.h> functions
  1240. * saving and restoring the whole
  1241. * floating-point environment need to do so
  1242. * anyway to restore the prctl settings from
  1243. * the saved environment.
  1244. */
  1245. tsk->thread.spefscr_last = mfspr(SPRN_SPEFSCR);
  1246. val = tsk->thread.fpexc_mode;
  1247. } else
  1248. return -EINVAL;
  1249. #else
  1250. return -EINVAL;
  1251. #endif
  1252. else
  1253. val = __unpack_fe01(tsk->thread.fpexc_mode);
  1254. return put_user(val, (unsigned int __user *) adr);
  1255. }
  1256. int set_endian(struct task_struct *tsk, unsigned int val)
  1257. {
  1258. struct pt_regs *regs = tsk->thread.regs;
  1259. if ((val == PR_ENDIAN_LITTLE && !cpu_has_feature(CPU_FTR_REAL_LE)) ||
  1260. (val == PR_ENDIAN_PPC_LITTLE && !cpu_has_feature(CPU_FTR_PPC_LE)))
  1261. return -EINVAL;
  1262. if (regs == NULL)
  1263. return -EINVAL;
  1264. if (val == PR_ENDIAN_BIG)
  1265. regs->msr &= ~MSR_LE;
  1266. else if (val == PR_ENDIAN_LITTLE || val == PR_ENDIAN_PPC_LITTLE)
  1267. regs->msr |= MSR_LE;
  1268. else
  1269. return -EINVAL;
  1270. return 0;
  1271. }
  1272. int get_endian(struct task_struct *tsk, unsigned long adr)
  1273. {
  1274. struct pt_regs *regs = tsk->thread.regs;
  1275. unsigned int val;
  1276. if (!cpu_has_feature(CPU_FTR_PPC_LE) &&
  1277. !cpu_has_feature(CPU_FTR_REAL_LE))
  1278. return -EINVAL;
  1279. if (regs == NULL)
  1280. return -EINVAL;
  1281. if (regs->msr & MSR_LE) {
  1282. if (cpu_has_feature(CPU_FTR_REAL_LE))
  1283. val = PR_ENDIAN_LITTLE;
  1284. else
  1285. val = PR_ENDIAN_PPC_LITTLE;
  1286. } else
  1287. val = PR_ENDIAN_BIG;
  1288. return put_user(val, (unsigned int __user *)adr);
  1289. }
  1290. int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
  1291. {
  1292. tsk->thread.align_ctl = val;
  1293. return 0;
  1294. }
  1295. int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
  1296. {
  1297. return put_user(tsk->thread.align_ctl, (unsigned int __user *)adr);
  1298. }
  1299. static inline int valid_irq_stack(unsigned long sp, struct task_struct *p,
  1300. unsigned long nbytes)
  1301. {
  1302. unsigned long stack_page;
  1303. unsigned long cpu = task_cpu(p);
  1304. /*
  1305. * Avoid crashing if the stack has overflowed and corrupted
  1306. * task_cpu(p), which is in the thread_info struct.
  1307. */
  1308. if (cpu < NR_CPUS && cpu_possible(cpu)) {
  1309. stack_page = (unsigned long) hardirq_ctx[cpu];
  1310. if (sp >= stack_page + sizeof(struct thread_struct)
  1311. && sp <= stack_page + THREAD_SIZE - nbytes)
  1312. return 1;
  1313. stack_page = (unsigned long) softirq_ctx[cpu];
  1314. if (sp >= stack_page + sizeof(struct thread_struct)
  1315. && sp <= stack_page + THREAD_SIZE - nbytes)
  1316. return 1;
  1317. }
  1318. return 0;
  1319. }
  1320. int validate_sp(unsigned long sp, struct task_struct *p,
  1321. unsigned long nbytes)
  1322. {
  1323. unsigned long stack_page = (unsigned long)task_stack_page(p);
  1324. if (sp >= stack_page + sizeof(struct thread_struct)
  1325. && sp <= stack_page + THREAD_SIZE - nbytes)
  1326. return 1;
  1327. return valid_irq_stack(sp, p, nbytes);
  1328. }
  1329. EXPORT_SYMBOL(validate_sp);
  1330. unsigned long get_wchan(struct task_struct *p)
  1331. {
  1332. unsigned long ip, sp;
  1333. int count = 0;
  1334. if (!p || p == current || p->state == TASK_RUNNING)
  1335. return 0;
  1336. sp = p->thread.ksp;
  1337. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  1338. return 0;
  1339. do {
  1340. sp = *(unsigned long *)sp;
  1341. if (!validate_sp(sp, p, STACK_FRAME_OVERHEAD))
  1342. return 0;
  1343. if (count > 0) {
  1344. ip = ((unsigned long *)sp)[STACK_FRAME_LR_SAVE];
  1345. if (!in_sched_functions(ip))
  1346. return ip;
  1347. }
  1348. } while (count++ < 16);
  1349. return 0;
  1350. }
  1351. static int kstack_depth_to_print = CONFIG_PRINT_STACK_DEPTH;
  1352. void show_stack(struct task_struct *tsk, unsigned long *stack)
  1353. {
  1354. unsigned long sp, ip, lr, newsp;
  1355. int count = 0;
  1356. int firstframe = 1;
  1357. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1358. int curr_frame = current->curr_ret_stack;
  1359. extern void return_to_handler(void);
  1360. unsigned long rth = (unsigned long)return_to_handler;
  1361. #endif
  1362. sp = (unsigned long) stack;
  1363. if (tsk == NULL)
  1364. tsk = current;
  1365. if (sp == 0) {
  1366. if (tsk == current)
  1367. sp = current_stack_pointer();
  1368. else
  1369. sp = tsk->thread.ksp;
  1370. }
  1371. lr = 0;
  1372. printk("Call Trace:\n");
  1373. do {
  1374. if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
  1375. return;
  1376. stack = (unsigned long *) sp;
  1377. newsp = stack[0];
  1378. ip = stack[STACK_FRAME_LR_SAVE];
  1379. if (!firstframe || ip != lr) {
  1380. printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
  1381. #ifdef CONFIG_FUNCTION_GRAPH_TRACER
  1382. if ((ip == rth) && curr_frame >= 0) {
  1383. printk(" (%pS)",
  1384. (void *)current->ret_stack[curr_frame].ret);
  1385. curr_frame--;
  1386. }
  1387. #endif
  1388. if (firstframe)
  1389. printk(" (unreliable)");
  1390. printk("\n");
  1391. }
  1392. firstframe = 0;
  1393. /*
  1394. * See if this is an exception frame.
  1395. * We look for the "regshere" marker in the current frame.
  1396. */
  1397. if (validate_sp(sp, tsk, STACK_INT_FRAME_SIZE)
  1398. && stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
  1399. struct pt_regs *regs = (struct pt_regs *)
  1400. (sp + STACK_FRAME_OVERHEAD);
  1401. lr = regs->link;
  1402. printk("--- interrupt: %lx at %pS\n LR = %pS\n",
  1403. regs->trap, (void *)regs->nip, (void *)lr);
  1404. firstframe = 1;
  1405. }
  1406. sp = newsp;
  1407. } while (count++ < kstack_depth_to_print);
  1408. }
  1409. #ifdef CONFIG_PPC64
  1410. /* Called with hard IRQs off */
  1411. void notrace __ppc64_runlatch_on(void)
  1412. {
  1413. struct thread_info *ti = current_thread_info();
  1414. unsigned long ctrl;
  1415. ctrl = mfspr(SPRN_CTRLF);
  1416. ctrl |= CTRL_RUNLATCH;
  1417. mtspr(SPRN_CTRLT, ctrl);
  1418. ti->local_flags |= _TLF_RUNLATCH;
  1419. }
  1420. /* Called with hard IRQs off */
  1421. void notrace __ppc64_runlatch_off(void)
  1422. {
  1423. struct thread_info *ti = current_thread_info();
  1424. unsigned long ctrl;
  1425. ti->local_flags &= ~_TLF_RUNLATCH;
  1426. ctrl = mfspr(SPRN_CTRLF);
  1427. ctrl &= ~CTRL_RUNLATCH;
  1428. mtspr(SPRN_CTRLT, ctrl);
  1429. }
  1430. #endif /* CONFIG_PPC64 */
  1431. unsigned long arch_align_stack(unsigned long sp)
  1432. {
  1433. if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
  1434. sp -= get_random_int() & ~PAGE_MASK;
  1435. return sp & ~0xf;
  1436. }
  1437. static inline unsigned long brk_rnd(void)
  1438. {
  1439. unsigned long rnd = 0;
  1440. /* 8MB for 32bit, 1GB for 64bit */
  1441. if (is_32bit_task())
  1442. rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
  1443. else
  1444. rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
  1445. return rnd << PAGE_SHIFT;
  1446. }
  1447. unsigned long arch_randomize_brk(struct mm_struct *mm)
  1448. {
  1449. unsigned long base = mm->brk;
  1450. unsigned long ret;
  1451. #ifdef CONFIG_PPC_STD_MMU_64
  1452. /*
  1453. * If we are using 1TB segments and we are allowed to randomise
  1454. * the heap, we can put it above 1TB so it is backed by a 1TB
  1455. * segment. Otherwise the heap will be in the bottom 1TB
  1456. * which always uses 256MB segments and this may result in a
  1457. * performance penalty.
  1458. */
  1459. if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
  1460. base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
  1461. #endif
  1462. ret = PAGE_ALIGN(base + brk_rnd());
  1463. if (ret < mm->brk)
  1464. return mm->brk;
  1465. return ret;
  1466. }