intvec_32.S 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. *
  14. * Linux interrupt vectors.
  15. */
  16. #include <linux/linkage.h>
  17. #include <linux/errno.h>
  18. #include <linux/init.h>
  19. #include <linux/unistd.h>
  20. #include <asm/ptrace.h>
  21. #include <asm/thread_info.h>
  22. #include <asm/irqflags.h>
  23. #include <asm/atomic_32.h>
  24. #include <asm/asm-offsets.h>
  25. #include <hv/hypervisor.h>
  26. #include <arch/abi.h>
  27. #include <arch/interrupts.h>
  28. #include <arch/spr_def.h>
  29. #define PTREGS_PTR(reg, ptreg) addli reg, sp, C_ABI_SAVE_AREA_SIZE + (ptreg)
  30. #define PTREGS_OFFSET_SYSCALL PTREGS_OFFSET_REG(TREG_SYSCALL_NR)
  31. .macro push_reg reg, ptr=sp, delta=-4
  32. {
  33. sw \ptr, \reg
  34. addli \ptr, \ptr, \delta
  35. }
  36. .endm
  37. .macro pop_reg reg, ptr=sp, delta=4
  38. {
  39. lw \reg, \ptr
  40. addli \ptr, \ptr, \delta
  41. }
  42. .endm
  43. .macro pop_reg_zero reg, zreg, ptr=sp, delta=4
  44. {
  45. move \zreg, zero
  46. lw \reg, \ptr
  47. addi \ptr, \ptr, \delta
  48. }
  49. .endm
  50. .macro push_extra_callee_saves reg
  51. PTREGS_PTR(\reg, PTREGS_OFFSET_REG(51))
  52. push_reg r51, \reg
  53. push_reg r50, \reg
  54. push_reg r49, \reg
  55. push_reg r48, \reg
  56. push_reg r47, \reg
  57. push_reg r46, \reg
  58. push_reg r45, \reg
  59. push_reg r44, \reg
  60. push_reg r43, \reg
  61. push_reg r42, \reg
  62. push_reg r41, \reg
  63. push_reg r40, \reg
  64. push_reg r39, \reg
  65. push_reg r38, \reg
  66. push_reg r37, \reg
  67. push_reg r36, \reg
  68. push_reg r35, \reg
  69. push_reg r34, \reg, PTREGS_OFFSET_BASE - PTREGS_OFFSET_REG(34)
  70. .endm
  71. .macro panic str
  72. .pushsection .rodata, "a"
  73. 1:
  74. .asciz "\str"
  75. .popsection
  76. {
  77. moveli r0, lo16(1b)
  78. }
  79. {
  80. auli r0, r0, ha16(1b)
  81. jal panic
  82. }
  83. .endm
  84. #ifdef __COLLECT_LINKER_FEEDBACK__
  85. .pushsection .text.intvec_feedback,"ax"
  86. intvec_feedback:
  87. .popsection
  88. #endif
  89. /*
  90. * Default interrupt handler.
  91. *
  92. * vecnum is where we'll put this code.
  93. * c_routine is the C routine we'll call.
  94. *
  95. * The C routine is passed two arguments:
  96. * - A pointer to the pt_regs state.
  97. * - The interrupt vector number.
  98. *
  99. * The "processing" argument specifies the code for processing
  100. * the interrupt. Defaults to "handle_interrupt".
  101. */
  102. .macro int_hand vecnum, vecname, c_routine, processing=handle_interrupt
  103. .org (\vecnum << 8)
  104. intvec_\vecname:
  105. .ifc \vecnum, INT_SWINT_1
  106. blz TREG_SYSCALL_NR_NAME, sys_cmpxchg
  107. .endif
  108. /* Temporarily save a register so we have somewhere to work. */
  109. mtspr SPR_SYSTEM_SAVE_K_1, r0
  110. mfspr r0, SPR_EX_CONTEXT_K_1
  111. /* The cmpxchg code clears sp to force us to reset it here on fault. */
  112. {
  113. bz sp, 2f
  114. andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
  115. }
  116. .ifc \vecnum, INT_DOUBLE_FAULT
  117. /*
  118. * For double-faults from user-space, fall through to the normal
  119. * register save and stack setup path. Otherwise, it's the
  120. * hypervisor giving us one last chance to dump diagnostics, and we
  121. * branch to the kernel_double_fault routine to do so.
  122. */
  123. bz r0, 1f
  124. j _kernel_double_fault
  125. 1:
  126. .else
  127. /*
  128. * If we're coming from user-space, then set sp to the top of
  129. * the kernel stack. Otherwise, assume sp is already valid.
  130. */
  131. {
  132. bnz r0, 0f
  133. move r0, sp
  134. }
  135. .endif
  136. .ifc \c_routine, do_page_fault
  137. /*
  138. * The page_fault handler may be downcalled directly by the
  139. * hypervisor even when Linux is running and has ICS set.
  140. *
  141. * In this case the contents of EX_CONTEXT_K_1 reflect the
  142. * previous fault and can't be relied on to choose whether or
  143. * not to reinitialize the stack pointer. So we add a test
  144. * to see whether SYSTEM_SAVE_K_2 has the high bit set,
  145. * and if so we don't reinitialize sp, since we must be coming
  146. * from Linux. (In fact the precise case is !(val & ~1),
  147. * but any Linux PC has to have the high bit set.)
  148. *
  149. * Note that the hypervisor *always* sets SYSTEM_SAVE_K_2 for
  150. * any path that turns into a downcall to one of our TLB handlers.
  151. */
  152. mfspr r0, SPR_SYSTEM_SAVE_K_2
  153. {
  154. blz r0, 0f /* high bit in S_S_1_2 is for a PC to use */
  155. move r0, sp
  156. }
  157. .endif
  158. 2:
  159. /*
  160. * SYSTEM_SAVE_K_0 holds the cpu number in the low bits, and
  161. * the current stack top in the higher bits. So we recover
  162. * our stack top by just masking off the low bits, then
  163. * point sp at the top aligned address on the actual stack page.
  164. */
  165. mfspr r0, SPR_SYSTEM_SAVE_K_0
  166. mm r0, r0, zero, LOG2_NR_CPU_IDS, 31
  167. 0:
  168. /*
  169. * Align the stack mod 64 so we can properly predict what
  170. * cache lines we need to write-hint to reduce memory fetch
  171. * latency as we enter the kernel. The layout of memory is
  172. * as follows, with cache line 0 at the lowest VA, and cache
  173. * line 4 just below the r0 value this "andi" computes.
  174. * Note that we never write to cache line 4, and we skip
  175. * cache line 1 for syscalls.
  176. *
  177. * cache line 4: ptregs padding (two words)
  178. * cache line 3: r46...lr, pc, ex1, faultnum, orig_r0, flags, pad
  179. * cache line 2: r30...r45
  180. * cache line 1: r14...r29
  181. * cache line 0: 2 x frame, r0..r13
  182. */
  183. #if STACK_TOP_DELTA != 64
  184. #error STACK_TOP_DELTA must be 64 for assumptions here and in task_pt_regs()
  185. #endif
  186. andi r0, r0, -64
  187. /*
  188. * Push the first four registers on the stack, so that we can set
  189. * them to vector-unique values before we jump to the common code.
  190. *
  191. * Registers are pushed on the stack as a struct pt_regs,
  192. * with the sp initially just above the struct, and when we're
  193. * done, sp points to the base of the struct, minus
  194. * C_ABI_SAVE_AREA_SIZE, so we can directly jal to C code.
  195. *
  196. * This routine saves just the first four registers, plus the
  197. * stack context so we can do proper backtracing right away,
  198. * and defers to handle_interrupt to save the rest.
  199. * The backtracer needs pc, ex1, lr, sp, r52, and faultnum.
  200. */
  201. addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
  202. wh64 r0 /* cache line 3 */
  203. {
  204. sw r0, lr
  205. addli r0, r0, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
  206. }
  207. {
  208. sw r0, sp
  209. addli sp, r0, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_SP
  210. }
  211. {
  212. sw sp, r52
  213. addli sp, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(52)
  214. }
  215. wh64 sp /* cache line 0 */
  216. {
  217. sw sp, r1
  218. addli sp, sp, PTREGS_OFFSET_REG(2) - PTREGS_OFFSET_REG(1)
  219. }
  220. {
  221. sw sp, r2
  222. addli sp, sp, PTREGS_OFFSET_REG(3) - PTREGS_OFFSET_REG(2)
  223. }
  224. {
  225. sw sp, r3
  226. addli sp, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(3)
  227. }
  228. mfspr r0, SPR_EX_CONTEXT_K_0
  229. .ifc \processing,handle_syscall
  230. /*
  231. * Bump the saved PC by one bundle so that when we return, we won't
  232. * execute the same swint instruction again. We need to do this while
  233. * we're in the critical section.
  234. */
  235. addi r0, r0, 8
  236. .endif
  237. {
  238. sw sp, r0
  239. addli sp, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
  240. }
  241. mfspr r0, SPR_EX_CONTEXT_K_1
  242. {
  243. sw sp, r0
  244. addi sp, sp, PTREGS_OFFSET_FAULTNUM - PTREGS_OFFSET_EX1
  245. /*
  246. * Use r0 for syscalls so it's a temporary; use r1 for interrupts
  247. * so that it gets passed through unchanged to the handler routine.
  248. * Note that the .if conditional confusingly spans bundles.
  249. */
  250. .ifc \processing,handle_syscall
  251. movei r0, \vecnum
  252. }
  253. {
  254. sw sp, r0
  255. .else
  256. movei r1, \vecnum
  257. }
  258. {
  259. sw sp, r1
  260. .endif
  261. addli sp, sp, PTREGS_OFFSET_REG(0) - PTREGS_OFFSET_FAULTNUM
  262. }
  263. mfspr r0, SPR_SYSTEM_SAVE_K_1 /* Original r0 */
  264. {
  265. sw sp, r0
  266. addi sp, sp, -PTREGS_OFFSET_REG(0) - 4
  267. }
  268. {
  269. sw sp, zero /* write zero into "Next SP" frame pointer */
  270. addi sp, sp, -4 /* leave SP pointing at bottom of frame */
  271. }
  272. .ifc \processing,handle_syscall
  273. j handle_syscall
  274. .else
  275. /*
  276. * Capture per-interrupt SPR context to registers.
  277. * We overload the meaning of r3 on this path such that if its bit 31
  278. * is set, we have to mask all interrupts including NMIs before
  279. * clearing the interrupt critical section bit.
  280. * See discussion below at "finish_interrupt_save".
  281. */
  282. .ifc \c_routine, do_page_fault
  283. mfspr r2, SPR_SYSTEM_SAVE_K_3 /* address of page fault */
  284. mfspr r3, SPR_SYSTEM_SAVE_K_2 /* info about page fault */
  285. .else
  286. .ifc \vecnum, INT_DOUBLE_FAULT
  287. {
  288. mfspr r2, SPR_SYSTEM_SAVE_K_2 /* double fault info from HV */
  289. movei r3, 0
  290. }
  291. .else
  292. .ifc \c_routine, do_trap
  293. {
  294. mfspr r2, GPV_REASON
  295. movei r3, 0
  296. }
  297. .else
  298. .ifc \c_routine, handle_perf_interrupt
  299. {
  300. mfspr r2, PERF_COUNT_STS
  301. movei r3, -1 /* not used, but set for consistency */
  302. }
  303. .else
  304. .ifc \c_routine, handle_perf_interrupt
  305. {
  306. mfspr r2, AUX_PERF_COUNT_STS
  307. movei r3, -1 /* not used, but set for consistency */
  308. }
  309. .else
  310. movei r3, 0
  311. .endif
  312. .endif
  313. .endif
  314. .endif
  315. .endif
  316. /* Put function pointer in r0 */
  317. moveli r0, lo16(\c_routine)
  318. {
  319. auli r0, r0, ha16(\c_routine)
  320. j \processing
  321. }
  322. .endif
  323. ENDPROC(intvec_\vecname)
  324. #ifdef __COLLECT_LINKER_FEEDBACK__
  325. .pushsection .text.intvec_feedback,"ax"
  326. .org (\vecnum << 5)
  327. FEEDBACK_ENTER_EXPLICIT(intvec_\vecname, .intrpt, 1 << 8)
  328. jrp lr
  329. .popsection
  330. #endif
  331. .endm
  332. /*
  333. * Save the rest of the registers that we didn't save in the actual
  334. * vector itself. We can't use r0-r10 inclusive here.
  335. */
  336. .macro finish_interrupt_save, function
  337. /* If it's a syscall, save a proper orig_r0, otherwise just zero. */
  338. PTREGS_PTR(r52, PTREGS_OFFSET_ORIG_R0)
  339. {
  340. .ifc \function,handle_syscall
  341. sw r52, r0
  342. .else
  343. sw r52, zero
  344. .endif
  345. PTREGS_PTR(r52, PTREGS_OFFSET_TP)
  346. }
  347. /*
  348. * For ordinary syscalls, we save neither caller- nor callee-
  349. * save registers, since the syscall invoker doesn't expect the
  350. * caller-saves to be saved, and the called kernel functions will
  351. * take care of saving the callee-saves for us.
  352. *
  353. * For interrupts we save just the caller-save registers. Saving
  354. * them is required (since the "caller" can't save them). Again,
  355. * the called kernel functions will restore the callee-save
  356. * registers for us appropriately.
  357. *
  358. * On return, we normally restore nothing special for syscalls,
  359. * and just the caller-save registers for interrupts.
  360. *
  361. * However, there are some important caveats to all this:
  362. *
  363. * - We always save a few callee-save registers to give us
  364. * some scratchpad registers to carry across function calls.
  365. *
  366. * - fork/vfork/etc require us to save all the callee-save
  367. * registers, which we do in PTREGS_SYSCALL_ALL_REGS, below.
  368. *
  369. * - We always save r0..r5 and r10 for syscalls, since we need
  370. * to reload them a bit later for the actual kernel call, and
  371. * since we might need them for -ERESTARTNOINTR, etc.
  372. *
  373. * - Before invoking a signal handler, we save the unsaved
  374. * callee-save registers so they are visible to the
  375. * signal handler or any ptracer.
  376. *
  377. * - If the unsaved callee-save registers are modified, we set
  378. * a bit in pt_regs so we know to reload them from pt_regs
  379. * and not just rely on the kernel function unwinding.
  380. * (Done for ptrace register writes and SA_SIGINFO handler.)
  381. */
  382. {
  383. sw r52, tp
  384. PTREGS_PTR(r52, PTREGS_OFFSET_REG(33))
  385. }
  386. wh64 r52 /* cache line 2 */
  387. push_reg r33, r52
  388. push_reg r32, r52
  389. push_reg r31, r52
  390. .ifc \function,handle_syscall
  391. push_reg r30, r52, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(30)
  392. push_reg TREG_SYSCALL_NR_NAME, r52, \
  393. PTREGS_OFFSET_REG(5) - PTREGS_OFFSET_SYSCALL
  394. .else
  395. push_reg r30, r52, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(30)
  396. wh64 r52 /* cache line 1 */
  397. push_reg r29, r52
  398. push_reg r28, r52
  399. push_reg r27, r52
  400. push_reg r26, r52
  401. push_reg r25, r52
  402. push_reg r24, r52
  403. push_reg r23, r52
  404. push_reg r22, r52
  405. push_reg r21, r52
  406. push_reg r20, r52
  407. push_reg r19, r52
  408. push_reg r18, r52
  409. push_reg r17, r52
  410. push_reg r16, r52
  411. push_reg r15, r52
  412. push_reg r14, r52
  413. push_reg r13, r52
  414. push_reg r12, r52
  415. push_reg r11, r52
  416. push_reg r10, r52
  417. push_reg r9, r52
  418. push_reg r8, r52
  419. push_reg r7, r52
  420. push_reg r6, r52
  421. .endif
  422. push_reg r5, r52
  423. sw r52, r4
  424. /* Load tp with our per-cpu offset. */
  425. #ifdef CONFIG_SMP
  426. {
  427. mfspr r20, SPR_SYSTEM_SAVE_K_0
  428. moveli r21, lo16(__per_cpu_offset)
  429. }
  430. {
  431. auli r21, r21, ha16(__per_cpu_offset)
  432. mm r20, r20, zero, 0, LOG2_NR_CPU_IDS-1
  433. }
  434. s2a r20, r20, r21
  435. lw tp, r20
  436. #else
  437. move tp, zero
  438. #endif
  439. /*
  440. * If we will be returning to the kernel, we will need to
  441. * reset the interrupt masks to the state they had before.
  442. * Set DISABLE_IRQ in flags iff we came from PL1 with irqs disabled.
  443. * We load flags in r32 here so we can jump to .Lrestore_regs
  444. * directly after do_page_fault_ics() if necessary.
  445. */
  446. mfspr r32, SPR_EX_CONTEXT_K_1
  447. {
  448. andi r32, r32, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
  449. PTREGS_PTR(r21, PTREGS_OFFSET_FLAGS)
  450. }
  451. bzt r32, 1f /* zero if from user space */
  452. IRQS_DISABLED(r32) /* zero if irqs enabled */
  453. #if PT_FLAGS_DISABLE_IRQ != 1
  454. # error Value of IRQS_DISABLED used to set PT_FLAGS_DISABLE_IRQ; fix
  455. #endif
  456. 1:
  457. .ifnc \function,handle_syscall
  458. /* Record the fact that we saved the caller-save registers above. */
  459. ori r32, r32, PT_FLAGS_CALLER_SAVES
  460. .endif
  461. sw r21, r32
  462. #ifdef __COLLECT_LINKER_FEEDBACK__
  463. /*
  464. * Notify the feedback routines that we were in the
  465. * appropriate fixed interrupt vector area. Note that we
  466. * still have ICS set at this point, so we can't invoke any
  467. * atomic operations or we will panic. The feedback
  468. * routines internally preserve r0..r10 and r30 up.
  469. */
  470. .ifnc \function,handle_syscall
  471. shli r20, r1, 5
  472. .else
  473. moveli r20, INT_SWINT_1 << 5
  474. .endif
  475. addli r20, r20, lo16(intvec_feedback)
  476. auli r20, r20, ha16(intvec_feedback)
  477. jalr r20
  478. /* And now notify the feedback routines that we are here. */
  479. FEEDBACK_ENTER(\function)
  480. #endif
  481. /*
  482. * we've captured enough state to the stack (including in
  483. * particular our EX_CONTEXT state) that we can now release
  484. * the interrupt critical section and replace it with our
  485. * standard "interrupts disabled" mask value. This allows
  486. * synchronous interrupts (and profile interrupts) to punch
  487. * through from this point onwards.
  488. *
  489. * If bit 31 of r3 is set during a non-NMI interrupt, we know we
  490. * are on the path where the hypervisor has punched through our
  491. * ICS with a page fault, so we call out to do_page_fault_ics()
  492. * to figure out what to do with it. If the fault was in
  493. * an atomic op, we unlock the atomic lock, adjust the
  494. * saved register state a little, and return "zero" in r4,
  495. * falling through into the normal page-fault interrupt code.
  496. * If the fault was in a kernel-space atomic operation, then
  497. * do_page_fault_ics() resolves it itself, returns "one" in r4,
  498. * and as a result goes directly to restoring registers and iret,
  499. * without trying to adjust the interrupt masks at all.
  500. * The do_page_fault_ics() API involves passing and returning
  501. * a five-word struct (in registers) to avoid writing the
  502. * save and restore code here.
  503. */
  504. .ifc \function,handle_nmi
  505. IRQ_DISABLE_ALL(r20)
  506. .else
  507. .ifnc \function,handle_syscall
  508. bgezt r3, 1f
  509. {
  510. PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
  511. jal do_page_fault_ics
  512. }
  513. FEEDBACK_REENTER(\function)
  514. bzt r4, 1f
  515. j .Lrestore_regs
  516. 1:
  517. .endif
  518. IRQ_DISABLE(r20, r21)
  519. .endif
  520. mtspr INTERRUPT_CRITICAL_SECTION, zero
  521. /*
  522. * Prepare the first 256 stack bytes to be rapidly accessible
  523. * without having to fetch the background data. We don't really
  524. * know how far to write-hint, but kernel stacks generally
  525. * aren't that big, and write-hinting here does take some time.
  526. */
  527. addi r52, sp, -64
  528. {
  529. wh64 r52
  530. addi r52, r52, -64
  531. }
  532. {
  533. wh64 r52
  534. addi r52, r52, -64
  535. }
  536. {
  537. wh64 r52
  538. addi r52, r52, -64
  539. }
  540. wh64 r52
  541. #if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
  542. .ifnc \function,handle_nmi
  543. /*
  544. * We finally have enough state set up to notify the irq
  545. * tracing code that irqs were disabled on entry to the handler.
  546. * The TRACE_IRQS_OFF call clobbers registers r0-r29.
  547. * For syscalls, we already have the register state saved away
  548. * on the stack, so we don't bother to do any register saves here,
  549. * and later we pop the registers back off the kernel stack.
  550. * For interrupt handlers, save r0-r3 in callee-saved registers.
  551. */
  552. .ifnc \function,handle_syscall
  553. { move r30, r0; move r31, r1 }
  554. { move r32, r2; move r33, r3 }
  555. .endif
  556. TRACE_IRQS_OFF
  557. #ifdef CONFIG_CONTEXT_TRACKING
  558. jal context_tracking_user_exit
  559. #endif
  560. .ifnc \function,handle_syscall
  561. { move r0, r30; move r1, r31 }
  562. { move r2, r32; move r3, r33 }
  563. .endif
  564. .endif
  565. #endif
  566. .endm
  567. .macro check_single_stepping, kind, not_single_stepping
  568. /*
  569. * Check for single stepping in user-level priv
  570. * kind can be "normal", "ill", or "syscall"
  571. * At end, if fall-thru
  572. * r29: thread_info->step_state
  573. * r28: &pt_regs->pc
  574. * r27: pt_regs->pc
  575. * r26: thread_info->step_state->buffer
  576. */
  577. /* Check for single stepping */
  578. GET_THREAD_INFO(r29)
  579. {
  580. /* Get pointer to field holding step state */
  581. addi r29, r29, THREAD_INFO_STEP_STATE_OFFSET
  582. /* Get pointer to EX1 in register state */
  583. PTREGS_PTR(r27, PTREGS_OFFSET_EX1)
  584. }
  585. {
  586. /* Get pointer to field holding PC */
  587. PTREGS_PTR(r28, PTREGS_OFFSET_PC)
  588. /* Load the pointer to the step state */
  589. lw r29, r29
  590. }
  591. /* Load EX1 */
  592. lw r27, r27
  593. {
  594. /* Points to flags */
  595. addi r23, r29, SINGLESTEP_STATE_FLAGS_OFFSET
  596. /* No single stepping if there is no step state structure */
  597. bzt r29, \not_single_stepping
  598. }
  599. {
  600. /* mask off ICS and any other high bits */
  601. andi r27, r27, SPR_EX_CONTEXT_1_1__PL_MASK
  602. /* Load pointer to single step instruction buffer */
  603. lw r26, r29
  604. }
  605. /* Check priv state */
  606. bnz r27, \not_single_stepping
  607. /* Get flags */
  608. lw r22, r23
  609. {
  610. /* Branch if single-step mode not enabled */
  611. bbnst r22, \not_single_stepping
  612. /* Clear enabled flag */
  613. andi r22, r22, ~SINGLESTEP_STATE_MASK_IS_ENABLED
  614. }
  615. .ifc \kind,normal
  616. {
  617. /* Load PC */
  618. lw r27, r28
  619. /* Point to the entry containing the original PC */
  620. addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET
  621. }
  622. {
  623. /* Disable single stepping flag */
  624. sw r23, r22
  625. }
  626. {
  627. /* Get the original pc */
  628. lw r24, r24
  629. /* See if the PC is at the start of the single step buffer */
  630. seq r25, r26, r27
  631. }
  632. /*
  633. * NOTE: it is really expected that the PC be in the single step buffer
  634. * at this point
  635. */
  636. bzt r25, \not_single_stepping
  637. /* Restore the original PC */
  638. sw r28, r24
  639. .else
  640. .ifc \kind,syscall
  641. {
  642. /* Load PC */
  643. lw r27, r28
  644. /* Point to the entry containing the next PC */
  645. addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET
  646. }
  647. {
  648. /* Increment the stopped PC by the bundle size */
  649. addi r26, r26, 8
  650. /* Disable single stepping flag */
  651. sw r23, r22
  652. }
  653. {
  654. /* Get the next pc */
  655. lw r24, r24
  656. /*
  657. * See if the PC is one bundle past the start of the
  658. * single step buffer
  659. */
  660. seq r25, r26, r27
  661. }
  662. {
  663. /*
  664. * NOTE: it is really expected that the PC be in the
  665. * single step buffer at this point
  666. */
  667. bzt r25, \not_single_stepping
  668. }
  669. /* Set to the next PC */
  670. sw r28, r24
  671. .else
  672. {
  673. /* Point to 3rd bundle in buffer */
  674. addi r25, r26, 16
  675. /* Load PC */
  676. lw r27, r28
  677. }
  678. {
  679. /* Disable single stepping flag */
  680. sw r23, r22
  681. /* See if the PC is in the single step buffer */
  682. slte_u r24, r26, r27
  683. }
  684. {
  685. slte_u r25, r27, r25
  686. /*
  687. * NOTE: it is really expected that the PC be in the
  688. * single step buffer at this point
  689. */
  690. bzt r24, \not_single_stepping
  691. }
  692. bzt r25, \not_single_stepping
  693. .endif
  694. .endif
  695. .endm
  696. /*
  697. * Redispatch a downcall.
  698. */
  699. .macro dc_dispatch vecnum, vecname
  700. .org (\vecnum << 8)
  701. intvec_\vecname:
  702. j _hv_downcall_dispatch
  703. ENDPROC(intvec_\vecname)
  704. .endm
  705. /*
  706. * Common code for most interrupts. The C function we're eventually
  707. * going to is in r0, and the faultnum is in r1; the original
  708. * values for those registers are on the stack.
  709. */
  710. .pushsection .text.handle_interrupt,"ax"
  711. handle_interrupt:
  712. finish_interrupt_save handle_interrupt
  713. /*
  714. * Check for if we are single stepping in user level. If so, then
  715. * we need to restore the PC.
  716. */
  717. check_single_stepping normal, .Ldispatch_interrupt
  718. .Ldispatch_interrupt:
  719. /* Jump to the C routine; it should enable irqs as soon as possible. */
  720. {
  721. jalr r0
  722. PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
  723. }
  724. FEEDBACK_REENTER(handle_interrupt)
  725. {
  726. movei r30, 0 /* not an NMI */
  727. j interrupt_return
  728. }
  729. STD_ENDPROC(handle_interrupt)
  730. /*
  731. * This routine takes a boolean in r30 indicating if this is an NMI.
  732. * If so, we also expect a boolean in r31 indicating whether to
  733. * re-enable the oprofile interrupts.
  734. *
  735. * Note that .Lresume_userspace is jumped to directly in several
  736. * places, and we need to make sure r30 is set correctly in those
  737. * callers as well.
  738. */
  739. STD_ENTRY(interrupt_return)
  740. /* If we're resuming to kernel space, don't check thread flags. */
  741. {
  742. bnz r30, .Lrestore_all /* NMIs don't special-case user-space */
  743. PTREGS_PTR(r29, PTREGS_OFFSET_EX1)
  744. }
  745. lw r29, r29
  746. andi r29, r29, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
  747. bzt r29, .Lresume_userspace
  748. #ifdef CONFIG_PREEMPT
  749. /* Returning to kernel space. Check if we need preemption. */
  750. GET_THREAD_INFO(r29)
  751. addli r28, r29, THREAD_INFO_FLAGS_OFFSET
  752. {
  753. lw r28, r28
  754. addli r29, r29, THREAD_INFO_PREEMPT_COUNT_OFFSET
  755. }
  756. {
  757. andi r28, r28, _TIF_NEED_RESCHED
  758. lw r29, r29
  759. }
  760. bzt r28, 1f
  761. bnz r29, 1f
  762. /* Disable interrupts explicitly for preemption. */
  763. IRQ_DISABLE(r20,r21)
  764. TRACE_IRQS_OFF
  765. jal preempt_schedule_irq
  766. FEEDBACK_REENTER(interrupt_return)
  767. 1:
  768. #endif
  769. /* If we're resuming to _cpu_idle_nap, bump PC forward by 8. */
  770. {
  771. PTREGS_PTR(r29, PTREGS_OFFSET_PC)
  772. moveli r27, lo16(_cpu_idle_nap)
  773. }
  774. {
  775. lw r28, r29
  776. auli r27, r27, ha16(_cpu_idle_nap)
  777. }
  778. {
  779. seq r27, r27, r28
  780. }
  781. {
  782. bbns r27, .Lrestore_all
  783. addi r28, r28, 8
  784. }
  785. sw r29, r28
  786. j .Lrestore_all
  787. .Lresume_userspace:
  788. FEEDBACK_REENTER(interrupt_return)
  789. /*
  790. * Disable interrupts so as to make sure we don't
  791. * miss an interrupt that sets any of the thread flags (like
  792. * need_resched or sigpending) between sampling and the iret.
  793. * Routines like schedule() or do_signal() may re-enable
  794. * interrupts before returning.
  795. */
  796. IRQ_DISABLE(r20, r21)
  797. TRACE_IRQS_OFF /* Note: clobbers registers r0-r29 */
  798. /*
  799. * See if there are any work items (including single-shot items)
  800. * to do. If so, save the callee-save registers to pt_regs
  801. * and then dispatch to C code.
  802. */
  803. GET_THREAD_INFO(r21)
  804. {
  805. addi r22, r21, THREAD_INFO_FLAGS_OFFSET
  806. moveli r20, lo16(_TIF_ALLWORK_MASK)
  807. }
  808. {
  809. lw r22, r22
  810. auli r20, r20, ha16(_TIF_ALLWORK_MASK)
  811. }
  812. and r1, r22, r20
  813. {
  814. PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
  815. bzt r1, .Lrestore_all
  816. }
  817. push_extra_callee_saves r0
  818. jal prepare_exit_to_usermode
  819. /*
  820. * In the NMI case we
  821. * omit the call to single_process_check_nohz, which normally checks
  822. * to see if we should start or stop the scheduler tick, because
  823. * we can't call arbitrary Linux code from an NMI context.
  824. * We always call the homecache TLB deferral code to re-trigger
  825. * the deferral mechanism.
  826. *
  827. * The other chunk of responsibility this code has is to reset the
  828. * interrupt masks appropriately to reset irqs and NMIs. We have
  829. * to call TRACE_IRQS_OFF and TRACE_IRQS_ON to support all the
  830. * lockdep-type stuff, but we can't set ICS until afterwards, since
  831. * ICS can only be used in very tight chunks of code to avoid
  832. * tripping over various assertions that it is off.
  833. *
  834. * (There is what looks like a window of vulnerability here since
  835. * we might take a profile interrupt between the two SPR writes
  836. * that set the mask, but since we write the low SPR word first,
  837. * and our interrupt entry code checks the low SPR word, any
  838. * profile interrupt will actually disable interrupts in both SPRs
  839. * before returning, which is OK.)
  840. */
  841. .Lrestore_all:
  842. PTREGS_PTR(r0, PTREGS_OFFSET_EX1)
  843. {
  844. lw r0, r0
  845. PTREGS_PTR(r32, PTREGS_OFFSET_FLAGS)
  846. }
  847. {
  848. andi r0, r0, SPR_EX_CONTEXT_1_1__PL_MASK
  849. lw r32, r32
  850. }
  851. bnz r0, 1f
  852. j 2f
  853. #if PT_FLAGS_DISABLE_IRQ != 1
  854. # error Assuming PT_FLAGS_DISABLE_IRQ == 1 so we can use bbnst below
  855. #endif
  856. 1: bbnst r32, 2f
  857. IRQ_DISABLE(r20,r21)
  858. TRACE_IRQS_OFF
  859. movei r0, 1
  860. mtspr INTERRUPT_CRITICAL_SECTION, r0
  861. bzt r30, .Lrestore_regs
  862. j 3f
  863. 2: TRACE_IRQS_ON
  864. movei r0, 1
  865. mtspr INTERRUPT_CRITICAL_SECTION, r0
  866. IRQ_ENABLE(r20, r21)
  867. bzt r30, .Lrestore_regs
  868. 3:
  869. /* We are relying on INT_PERF_COUNT at 33, and AUX_PERF_COUNT at 48 */
  870. {
  871. moveli r0, lo16(1 << (INT_PERF_COUNT - 32))
  872. bz r31, .Lrestore_regs
  873. }
  874. auli r0, r0, ha16(1 << (INT_AUX_PERF_COUNT - 32))
  875. mtspr SPR_INTERRUPT_MASK_RESET_K_1, r0
  876. /*
  877. * We now commit to returning from this interrupt, since we will be
  878. * doing things like setting EX_CONTEXT SPRs and unwinding the stack
  879. * frame. No calls should be made to any other code after this point.
  880. * This code should only be entered with ICS set.
  881. * r32 must still be set to ptregs.flags.
  882. * We launch loads to each cache line separately first, so we can
  883. * get some parallelism out of the memory subsystem.
  884. * We start zeroing caller-saved registers throughout, since
  885. * that will save some cycles if this turns out to be a syscall.
  886. */
  887. .Lrestore_regs:
  888. FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
  889. /*
  890. * Rotate so we have one high bit and one low bit to test.
  891. * - low bit says whether to restore all the callee-saved registers,
  892. * or just r30-r33, and r52 up.
  893. * - high bit (i.e. sign bit) says whether to restore all the
  894. * caller-saved registers, or just r0.
  895. */
  896. #if PT_FLAGS_CALLER_SAVES != 2 || PT_FLAGS_RESTORE_REGS != 4
  897. # error Rotate trick does not work :-)
  898. #endif
  899. {
  900. rli r20, r32, 30
  901. PTREGS_PTR(sp, PTREGS_OFFSET_REG(0))
  902. }
  903. /*
  904. * Load cache lines 0, 2, and 3 in that order, then use
  905. * the last loaded value, which makes it likely that the other
  906. * cache lines have also loaded, at which point we should be
  907. * able to safely read all the remaining words on those cache
  908. * lines without waiting for the memory subsystem.
  909. */
  910. pop_reg_zero r0, r28, sp, PTREGS_OFFSET_REG(30) - PTREGS_OFFSET_REG(0)
  911. pop_reg_zero r30, r2, sp, PTREGS_OFFSET_PC - PTREGS_OFFSET_REG(30)
  912. pop_reg_zero r21, r3, sp, PTREGS_OFFSET_EX1 - PTREGS_OFFSET_PC
  913. pop_reg_zero lr, r4, sp, PTREGS_OFFSET_REG(52) - PTREGS_OFFSET_EX1
  914. {
  915. mtspr SPR_EX_CONTEXT_K_0, r21
  916. move r5, zero
  917. }
  918. {
  919. mtspr SPR_EX_CONTEXT_K_1, lr
  920. andi lr, lr, SPR_EX_CONTEXT_1_1__PL_MASK /* mask off ICS */
  921. }
  922. /* Restore callee-saveds that we actually use. */
  923. pop_reg_zero r52, r6, sp, PTREGS_OFFSET_REG(31) - PTREGS_OFFSET_REG(52)
  924. pop_reg_zero r31, r7
  925. pop_reg_zero r32, r8
  926. pop_reg_zero r33, r9, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(33)
  927. /*
  928. * If we modified other callee-saveds, restore them now.
  929. * This is rare, but could be via ptrace or signal handler.
  930. */
  931. {
  932. move r10, zero
  933. bbs r20, .Lrestore_callees
  934. }
  935. .Lcontinue_restore_regs:
  936. /* Check if we're returning from a syscall. */
  937. {
  938. move r11, zero
  939. blzt r20, 1f /* no, so go restore callee-save registers */
  940. }
  941. /*
  942. * Check if we're returning to userspace.
  943. * Note that if we're not, we don't worry about zeroing everything.
  944. */
  945. {
  946. addli sp, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(29)
  947. bnz lr, .Lkernel_return
  948. }
  949. /*
  950. * On return from syscall, we've restored r0 from pt_regs, but we
  951. * clear the remainder of the caller-saved registers. We could
  952. * restore the syscall arguments, but there's not much point,
  953. * and it ensures user programs aren't trying to use the
  954. * caller-saves if we clear them, as well as avoiding leaking
  955. * kernel pointers into userspace.
  956. */
  957. pop_reg_zero lr, r12, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
  958. pop_reg_zero tp, r13, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
  959. {
  960. lw sp, sp
  961. move r14, zero
  962. move r15, zero
  963. }
  964. { move r16, zero; move r17, zero }
  965. { move r18, zero; move r19, zero }
  966. { move r20, zero; move r21, zero }
  967. { move r22, zero; move r23, zero }
  968. { move r24, zero; move r25, zero }
  969. { move r26, zero; move r27, zero }
  970. /* Set r1 to errno if we are returning an error, otherwise zero. */
  971. {
  972. moveli r29, 4096
  973. sub r1, zero, r0
  974. }
  975. slt_u r29, r1, r29
  976. {
  977. mnz r1, r29, r1
  978. move r29, zero
  979. }
  980. iret
  981. /*
  982. * Not a syscall, so restore caller-saved registers.
  983. * First kick off a load for cache line 1, which we're touching
  984. * for the first time here.
  985. */
  986. .align 64
  987. 1: pop_reg r29, sp, PTREGS_OFFSET_REG(1) - PTREGS_OFFSET_REG(29)
  988. pop_reg r1
  989. pop_reg r2
  990. pop_reg r3
  991. pop_reg r4
  992. pop_reg r5
  993. pop_reg r6
  994. pop_reg r7
  995. pop_reg r8
  996. pop_reg r9
  997. pop_reg r10
  998. pop_reg r11
  999. pop_reg r12
  1000. pop_reg r13
  1001. pop_reg r14
  1002. pop_reg r15
  1003. pop_reg r16
  1004. pop_reg r17
  1005. pop_reg r18
  1006. pop_reg r19
  1007. pop_reg r20
  1008. pop_reg r21
  1009. pop_reg r22
  1010. pop_reg r23
  1011. pop_reg r24
  1012. pop_reg r25
  1013. pop_reg r26
  1014. pop_reg r27
  1015. pop_reg r28, sp, PTREGS_OFFSET_LR - PTREGS_OFFSET_REG(28)
  1016. /* r29 already restored above */
  1017. bnz lr, .Lkernel_return
  1018. pop_reg lr, sp, PTREGS_OFFSET_TP - PTREGS_OFFSET_LR
  1019. pop_reg tp, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_TP
  1020. lw sp, sp
  1021. iret
  1022. /*
  1023. * We can't restore tp when in kernel mode, since a thread might
  1024. * have migrated from another cpu and brought a stale tp value.
  1025. */
  1026. .Lkernel_return:
  1027. pop_reg lr, sp, PTREGS_OFFSET_SP - PTREGS_OFFSET_LR
  1028. lw sp, sp
  1029. iret
  1030. /* Restore callee-saved registers from r34 to r51. */
  1031. .Lrestore_callees:
  1032. addli sp, sp, PTREGS_OFFSET_REG(34) - PTREGS_OFFSET_REG(29)
  1033. pop_reg r34
  1034. pop_reg r35
  1035. pop_reg r36
  1036. pop_reg r37
  1037. pop_reg r38
  1038. pop_reg r39
  1039. pop_reg r40
  1040. pop_reg r41
  1041. pop_reg r42
  1042. pop_reg r43
  1043. pop_reg r44
  1044. pop_reg r45
  1045. pop_reg r46
  1046. pop_reg r47
  1047. pop_reg r48
  1048. pop_reg r49
  1049. pop_reg r50
  1050. pop_reg r51, sp, PTREGS_OFFSET_REG(29) - PTREGS_OFFSET_REG(51)
  1051. j .Lcontinue_restore_regs
  1052. STD_ENDPROC(interrupt_return)
  1053. /*
  1054. * Some interrupts don't check for single stepping
  1055. */
  1056. .pushsection .text.handle_interrupt_no_single_step,"ax"
  1057. handle_interrupt_no_single_step:
  1058. finish_interrupt_save handle_interrupt_no_single_step
  1059. {
  1060. jalr r0
  1061. PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
  1062. }
  1063. FEEDBACK_REENTER(handle_interrupt_no_single_step)
  1064. {
  1065. movei r30, 0 /* not an NMI */
  1066. j interrupt_return
  1067. }
  1068. STD_ENDPROC(handle_interrupt_no_single_step)
  1069. /*
  1070. * "NMI" interrupts mask ALL interrupts before calling the
  1071. * handler, and don't check thread flags, etc., on the way
  1072. * back out. In general, the only things we do here for NMIs
  1073. * are the register save/restore, fixing the PC if we were
  1074. * doing single step, and the dataplane kernel-TLB management.
  1075. * We don't (for example) deal with start/stop of the sched tick.
  1076. */
  1077. .pushsection .text.handle_nmi,"ax"
  1078. handle_nmi:
  1079. finish_interrupt_save handle_nmi
  1080. check_single_stepping normal, .Ldispatch_nmi
  1081. .Ldispatch_nmi:
  1082. {
  1083. jalr r0
  1084. PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
  1085. }
  1086. FEEDBACK_REENTER(handle_nmi)
  1087. {
  1088. movei r30, 1
  1089. seq r31, r0, zero
  1090. }
  1091. j interrupt_return
  1092. STD_ENDPROC(handle_nmi)
  1093. /*
  1094. * Parallel code for syscalls to handle_interrupt.
  1095. */
  1096. .pushsection .text.handle_syscall,"ax"
  1097. handle_syscall:
  1098. finish_interrupt_save handle_syscall
  1099. /*
  1100. * Check for if we are single stepping in user level. If so, then
  1101. * we need to restore the PC.
  1102. */
  1103. check_single_stepping syscall, .Ldispatch_syscall
  1104. .Ldispatch_syscall:
  1105. /* Enable irqs. */
  1106. TRACE_IRQS_ON
  1107. IRQ_ENABLE(r20, r21)
  1108. /* Bump the counter for syscalls made on this tile. */
  1109. moveli r20, lo16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
  1110. auli r20, r20, ha16(irq_stat + IRQ_CPUSTAT_SYSCALL_COUNT_OFFSET)
  1111. add r20, r20, tp
  1112. lw r21, r20
  1113. addi r21, r21, 1
  1114. {
  1115. sw r20, r21
  1116. GET_THREAD_INFO(r31)
  1117. }
  1118. /* Trace syscalls, if requested. */
  1119. addi r31, r31, THREAD_INFO_FLAGS_OFFSET
  1120. lw r30, r31
  1121. andi r30, r30, _TIF_SYSCALL_TRACE
  1122. bzt r30, .Lrestore_syscall_regs
  1123. {
  1124. PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
  1125. jal do_syscall_trace_enter
  1126. }
  1127. FEEDBACK_REENTER(handle_syscall)
  1128. blz r0, .Lsyscall_sigreturn_skip
  1129. /*
  1130. * We always reload our registers from the stack at this
  1131. * point. They might be valid, if we didn't build with
  1132. * TRACE_IRQFLAGS, and this isn't a dataplane tile, and we're not
  1133. * doing syscall tracing, but there are enough cases now that it
  1134. * seems simplest just to do the reload unconditionally.
  1135. */
  1136. .Lrestore_syscall_regs:
  1137. PTREGS_PTR(r11, PTREGS_OFFSET_REG(0))
  1138. pop_reg r0, r11
  1139. pop_reg r1, r11
  1140. pop_reg r2, r11
  1141. pop_reg r3, r11
  1142. pop_reg r4, r11
  1143. pop_reg r5, r11, PTREGS_OFFSET_SYSCALL - PTREGS_OFFSET_REG(5)
  1144. pop_reg TREG_SYSCALL_NR_NAME, r11
  1145. /* Ensure that the syscall number is within the legal range. */
  1146. moveli r21, __NR_syscalls
  1147. {
  1148. slt_u r21, TREG_SYSCALL_NR_NAME, r21
  1149. moveli r20, lo16(sys_call_table)
  1150. }
  1151. {
  1152. bbns r21, .Linvalid_syscall
  1153. auli r20, r20, ha16(sys_call_table)
  1154. }
  1155. s2a r20, TREG_SYSCALL_NR_NAME, r20
  1156. lw r20, r20
  1157. /* Jump to syscall handler. */
  1158. jalr r20
  1159. .Lhandle_syscall_link: /* value of "lr" after "jalr r20" above */
  1160. /*
  1161. * Write our r0 onto the stack so it gets restored instead
  1162. * of whatever the user had there before.
  1163. */
  1164. PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
  1165. sw r29, r0
  1166. .Lsyscall_sigreturn_skip:
  1167. FEEDBACK_REENTER(handle_syscall)
  1168. /* Do syscall trace again, if requested. */
  1169. lw r30, r31
  1170. andi r30, r30, _TIF_SYSCALL_TRACE
  1171. bzt r30, 1f
  1172. {
  1173. PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
  1174. jal do_syscall_trace_exit
  1175. }
  1176. FEEDBACK_REENTER(handle_syscall)
  1177. 1: {
  1178. movei r30, 0 /* not an NMI */
  1179. j .Lresume_userspace /* jump into middle of interrupt_return */
  1180. }
  1181. .Linvalid_syscall:
  1182. /* Report an invalid syscall back to the user program */
  1183. {
  1184. PTREGS_PTR(r29, PTREGS_OFFSET_REG(0))
  1185. movei r28, -ENOSYS
  1186. }
  1187. sw r29, r28
  1188. {
  1189. movei r30, 0 /* not an NMI */
  1190. j .Lresume_userspace /* jump into middle of interrupt_return */
  1191. }
  1192. STD_ENDPROC(handle_syscall)
  1193. /* Return the address for oprofile to suppress in backtraces. */
  1194. STD_ENTRY_SECTION(handle_syscall_link_address, .text.handle_syscall)
  1195. lnk r0
  1196. {
  1197. addli r0, r0, .Lhandle_syscall_link - .
  1198. jrp lr
  1199. }
  1200. STD_ENDPROC(handle_syscall_link_address)
  1201. STD_ENTRY(ret_from_fork)
  1202. jal sim_notify_fork
  1203. jal schedule_tail
  1204. FEEDBACK_REENTER(ret_from_fork)
  1205. {
  1206. movei r30, 0 /* not an NMI */
  1207. j .Lresume_userspace /* jump into middle of interrupt_return */
  1208. }
  1209. STD_ENDPROC(ret_from_fork)
  1210. STD_ENTRY(ret_from_kernel_thread)
  1211. jal sim_notify_fork
  1212. jal schedule_tail
  1213. FEEDBACK_REENTER(ret_from_fork)
  1214. {
  1215. move r0, r31
  1216. jalr r30
  1217. }
  1218. FEEDBACK_REENTER(ret_from_kernel_thread)
  1219. {
  1220. movei r30, 0 /* not an NMI */
  1221. j interrupt_return
  1222. }
  1223. STD_ENDPROC(ret_from_kernel_thread)
  1224. /*
  1225. * Code for ill interrupt.
  1226. */
  1227. .pushsection .text.handle_ill,"ax"
  1228. handle_ill:
  1229. finish_interrupt_save handle_ill
  1230. /*
  1231. * Check for if we are single stepping in user level. If so, then
  1232. * we need to restore the PC.
  1233. */
  1234. check_single_stepping ill, .Ldispatch_normal_ill
  1235. {
  1236. /* See if the PC is the 1st bundle in the buffer */
  1237. seq r25, r27, r26
  1238. /* Point to the 2nd bundle in the buffer */
  1239. addi r26, r26, 8
  1240. }
  1241. {
  1242. /* Point to the original pc */
  1243. addi r24, r29, SINGLESTEP_STATE_ORIG_PC_OFFSET
  1244. /* Branch if the PC is the 1st bundle in the buffer */
  1245. bnz r25, 3f
  1246. }
  1247. {
  1248. /* See if the PC is the 2nd bundle of the buffer */
  1249. seq r25, r27, r26
  1250. /* Set PC to next instruction */
  1251. addi r24, r29, SINGLESTEP_STATE_NEXT_PC_OFFSET
  1252. }
  1253. {
  1254. /* Point to flags */
  1255. addi r25, r29, SINGLESTEP_STATE_FLAGS_OFFSET
  1256. /* Branch if PC is in the second bundle */
  1257. bz r25, 2f
  1258. }
  1259. /* Load flags */
  1260. lw r25, r25
  1261. {
  1262. /*
  1263. * Get the offset for the register to restore
  1264. * Note: the lower bound is 2, so we have implicit scaling by 4.
  1265. * No multiplication of the register number by the size of a register
  1266. * is needed.
  1267. */
  1268. mm r27, r25, zero, SINGLESTEP_STATE_TARGET_LB, \
  1269. SINGLESTEP_STATE_TARGET_UB
  1270. /* Mask Rewrite_LR */
  1271. andi r25, r25, SINGLESTEP_STATE_MASK_UPDATE
  1272. }
  1273. {
  1274. addi r29, r29, SINGLESTEP_STATE_UPDATE_VALUE_OFFSET
  1275. /* Don't rewrite temp register */
  1276. bz r25, 3f
  1277. }
  1278. {
  1279. /* Get the temp value */
  1280. lw r29, r29
  1281. /* Point to where the register is stored */
  1282. add r27, r27, sp
  1283. }
  1284. /* Add in the C ABI save area size to the register offset */
  1285. addi r27, r27, C_ABI_SAVE_AREA_SIZE
  1286. /* Restore the user's register with the temp value */
  1287. sw r27, r29
  1288. j 3f
  1289. 2:
  1290. /* Must be in the third bundle */
  1291. addi r24, r29, SINGLESTEP_STATE_BRANCH_NEXT_PC_OFFSET
  1292. 3:
  1293. /* set PC and continue */
  1294. lw r26, r24
  1295. {
  1296. sw r28, r26
  1297. GET_THREAD_INFO(r0)
  1298. }
  1299. /*
  1300. * Clear TIF_SINGLESTEP to prevent recursion if we execute an ill.
  1301. * The normal non-arch flow redundantly clears TIF_SINGLESTEP, but we
  1302. * need to clear it here and can't really impose on all other arches.
  1303. * So what's another write between friends?
  1304. */
  1305. addi r1, r0, THREAD_INFO_FLAGS_OFFSET
  1306. {
  1307. lw r2, r1
  1308. addi r0, r0, THREAD_INFO_TASK_OFFSET /* currently a no-op */
  1309. }
  1310. andi r2, r2, ~_TIF_SINGLESTEP
  1311. sw r1, r2
  1312. /* Issue a sigtrap */
  1313. {
  1314. lw r0, r0 /* indirect thru thread_info to get task_info*/
  1315. addi r1, sp, C_ABI_SAVE_AREA_SIZE /* put ptregs pointer into r1 */
  1316. }
  1317. jal send_sigtrap /* issue a SIGTRAP */
  1318. FEEDBACK_REENTER(handle_ill)
  1319. {
  1320. movei r30, 0 /* not an NMI */
  1321. j .Lresume_userspace /* jump into middle of interrupt_return */
  1322. }
  1323. .Ldispatch_normal_ill:
  1324. {
  1325. jalr r0
  1326. PTREGS_PTR(r0, PTREGS_OFFSET_BASE)
  1327. }
  1328. FEEDBACK_REENTER(handle_ill)
  1329. {
  1330. movei r30, 0 /* not an NMI */
  1331. j interrupt_return
  1332. }
  1333. STD_ENDPROC(handle_ill)
  1334. /* Various stub interrupt handlers and syscall handlers */
  1335. STD_ENTRY_LOCAL(_kernel_double_fault)
  1336. mfspr r1, SPR_EX_CONTEXT_K_0
  1337. move r2, lr
  1338. move r3, sp
  1339. move r4, r52
  1340. addi sp, sp, -C_ABI_SAVE_AREA_SIZE
  1341. j kernel_double_fault
  1342. STD_ENDPROC(_kernel_double_fault)
  1343. STD_ENTRY_LOCAL(bad_intr)
  1344. mfspr r2, SPR_EX_CONTEXT_K_0
  1345. panic "Unhandled interrupt %#x: PC %#lx"
  1346. STD_ENDPROC(bad_intr)
  1347. /*
  1348. * Special-case sigreturn to not write r0 to the stack on return.
  1349. * This is technically more efficient, but it also avoids difficulties
  1350. * in the 64-bit OS when handling 32-bit compat code, since we must not
  1351. * sign-extend r0 for the sigreturn return-value case.
  1352. */
  1353. #define PTREGS_SYSCALL_SIGRETURN(x, reg) \
  1354. STD_ENTRY(_##x); \
  1355. addli lr, lr, .Lsyscall_sigreturn_skip - .Lhandle_syscall_link; \
  1356. { \
  1357. PTREGS_PTR(reg, PTREGS_OFFSET_BASE); \
  1358. j x \
  1359. }; \
  1360. STD_ENDPROC(_##x)
  1361. PTREGS_SYSCALL_SIGRETURN(sys_rt_sigreturn, r0)
  1362. /* Save additional callee-saves to pt_regs and jump to standard function. */
  1363. STD_ENTRY(_sys_clone)
  1364. push_extra_callee_saves r4
  1365. j sys_clone
  1366. STD_ENDPROC(_sys_clone)
  1367. /*
  1368. * This entrypoint is taken for the cmpxchg and atomic_update fast
  1369. * swints. We may wish to generalize it to other fast swints at some
  1370. * point, but for now there are just two very similar ones, which
  1371. * makes it faster.
  1372. *
  1373. * The fast swint code is designed to have a small footprint. It does
  1374. * not save or restore any GPRs, counting on the caller-save registers
  1375. * to be available to it on entry. It does not modify any callee-save
  1376. * registers (including "lr"). It does not check what PL it is being
  1377. * called at, so you'd better not call it other than at PL0.
  1378. * The <atomic.h> wrapper assumes it only clobbers r20-r29, so if
  1379. * it ever is necessary to use more registers, be aware.
  1380. *
  1381. * It does not use the stack, but since it might be re-interrupted by
  1382. * a page fault which would assume the stack was valid, it does
  1383. * save/restore the stack pointer and zero it out to make sure it gets reset.
  1384. * Since we always keep interrupts disabled, the hypervisor won't
  1385. * clobber our EX_CONTEXT_K_x registers, so we don't save/restore them
  1386. * (other than to advance the PC on return).
  1387. *
  1388. * We have to manually validate the user vs kernel address range
  1389. * (since at PL1 we can read/write both), and for performance reasons
  1390. * we don't allow cmpxchg on the fc000000 memory region, since we only
  1391. * validate that the user address is below PAGE_OFFSET.
  1392. *
  1393. * We place it in the __HEAD section to ensure it is relatively
  1394. * near to the intvec_SWINT_1 code (reachable by a conditional branch).
  1395. *
  1396. * Our use of ATOMIC_LOCK_REG here must match do_page_fault_ics().
  1397. *
  1398. * As we do in lib/atomic_asm_32.S, we bypass a store if the value we
  1399. * would store is the same as the value we just loaded.
  1400. */
  1401. __HEAD
  1402. .align 64
  1403. /* Align much later jump on the start of a cache line. */
  1404. nop
  1405. #if PAGE_SIZE >= 0x10000
  1406. nop
  1407. #endif
  1408. ENTRY(sys_cmpxchg)
  1409. /*
  1410. * Save "sp" and set it zero for any possible page fault.
  1411. *
  1412. * HACK: We want to both zero sp and check r0's alignment,
  1413. * so we do both at once. If "sp" becomes nonzero we
  1414. * know r0 is unaligned and branch to the error handler that
  1415. * restores sp, so this is OK.
  1416. *
  1417. * ICS is disabled right now so having a garbage but nonzero
  1418. * sp is OK, since we won't execute any faulting instructions
  1419. * when it is nonzero.
  1420. */
  1421. {
  1422. move r27, sp
  1423. andi sp, r0, 3
  1424. }
  1425. /*
  1426. * Get the lock address in ATOMIC_LOCK_REG, and also validate that the
  1427. * address is less than PAGE_OFFSET, since that won't trap at PL1.
  1428. * We only use bits less than PAGE_SHIFT to avoid having to worry
  1429. * about aliasing among multiple mappings of the same physical page,
  1430. * and we ignore the low 3 bits so we have one lock that covers
  1431. * both a cmpxchg64() and a cmpxchg() on either its low or high word.
  1432. * NOTE: this must match __atomic_hashed_lock() in lib/atomic_32.c.
  1433. */
  1434. #if (PAGE_OFFSET & 0xffff) != 0
  1435. # error Code here assumes PAGE_OFFSET can be loaded with just hi16()
  1436. #endif
  1437. {
  1438. /* Check for unaligned input. */
  1439. bnz sp, .Lcmpxchg_badaddr
  1440. auli r23, zero, hi16(PAGE_OFFSET) /* hugepage-aligned */
  1441. }
  1442. {
  1443. /*
  1444. * Slide bits into position for 'mm'. We want to ignore
  1445. * the low 3 bits of r0, and consider only the next
  1446. * ATOMIC_HASH_SHIFT bits.
  1447. * Because of C pointer arithmetic, we want to compute this:
  1448. *
  1449. * ((char*)atomic_locks +
  1450. * (((r0 >> 3) & ((1 << ATOMIC_HASH_SHIFT) - 1)) << 2))
  1451. *
  1452. * Instead of two shifts we just ">> 1", and use 'mm'
  1453. * to ignore the low and high bits we don't want.
  1454. */
  1455. shri r25, r0, 1
  1456. slt_u r23, r0, r23
  1457. /*
  1458. * Ensure that the TLB is loaded before we take out the lock.
  1459. * This will start fetching the value all the way into our L1
  1460. * as well (and if it gets modified before we grab the lock,
  1461. * it will be invalidated from our cache before we reload it).
  1462. */
  1463. lw r26, r0
  1464. }
  1465. {
  1466. auli r21, zero, ha16(atomic_locks)
  1467. bbns r23, .Lcmpxchg_badaddr
  1468. }
  1469. #if PAGE_SIZE < 0x10000
  1470. /* atomic_locks is page-aligned so for big pages we don't need this. */
  1471. addli r21, r21, lo16(atomic_locks)
  1472. #endif
  1473. {
  1474. /*
  1475. * Insert the hash bits into the page-aligned pointer.
  1476. * ATOMIC_HASH_SHIFT is so big that we don't actually hash
  1477. * the unmasked address bits, as that may cause unnecessary
  1478. * collisions.
  1479. */
  1480. mm ATOMIC_LOCK_REG_NAME, r25, r21, 2, (ATOMIC_HASH_SHIFT + 2) - 1
  1481. seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_cmpxchg64
  1482. }
  1483. {
  1484. /* Branch away at this point if we're doing a 64-bit cmpxchg. */
  1485. bbs r23, .Lcmpxchg64
  1486. andi r23, r0, 7 /* Precompute alignment for cmpxchg64. */
  1487. }
  1488. {
  1489. /*
  1490. * We very carefully align the code that actually runs with
  1491. * the lock held (twelve bundles) so that we know it is all in
  1492. * the icache when we start. This instruction (the jump) is
  1493. * at the start of the first cache line, address zero mod 64;
  1494. * we jump to the very end of the second cache line to get that
  1495. * line loaded in the icache, then fall through to issue the tns
  1496. * in the third cache line, at which point it's all cached.
  1497. * Note that is for performance, not correctness.
  1498. */
  1499. j .Lcmpxchg32_tns
  1500. }
  1501. /* Symbol for do_page_fault_ics() to use to compare against the PC. */
  1502. .global __sys_cmpxchg_grab_lock
  1503. __sys_cmpxchg_grab_lock:
  1504. /*
  1505. * Perform the actual cmpxchg or atomic_update.
  1506. */
  1507. .Ldo_cmpxchg32:
  1508. {
  1509. lw r21, r0
  1510. seqi r23, TREG_SYSCALL_NR_NAME, __NR_FAST_atomic_update
  1511. move r24, r2
  1512. }
  1513. {
  1514. seq r22, r21, r1 /* See if cmpxchg matches. */
  1515. and r25, r21, r1 /* If atomic_update, compute (*mem & mask) */
  1516. }
  1517. {
  1518. or r22, r22, r23 /* Skip compare branch for atomic_update. */
  1519. add r25, r25, r2 /* Compute (*mem & mask) + addend. */
  1520. }
  1521. {
  1522. mvnz r24, r23, r25 /* Use atomic_update value if appropriate. */
  1523. bbns r22, .Lcmpxchg32_nostore
  1524. }
  1525. seq r22, r24, r21 /* Are we storing the value we loaded? */
  1526. bbs r22, .Lcmpxchg32_nostore
  1527. sw r0, r24
  1528. /* The following instruction is the start of the second cache line. */
  1529. /* Do slow mtspr here so the following "mf" waits less. */
  1530. {
  1531. move sp, r27
  1532. mtspr SPR_EX_CONTEXT_K_0, r28
  1533. }
  1534. mf
  1535. {
  1536. move r0, r21
  1537. sw ATOMIC_LOCK_REG_NAME, zero
  1538. }
  1539. iret
  1540. /* Duplicated code here in the case where we don't overlap "mf" */
  1541. .Lcmpxchg32_nostore:
  1542. {
  1543. move r0, r21
  1544. sw ATOMIC_LOCK_REG_NAME, zero
  1545. }
  1546. {
  1547. move sp, r27
  1548. mtspr SPR_EX_CONTEXT_K_0, r28
  1549. }
  1550. iret
  1551. /*
  1552. * The locking code is the same for 32-bit cmpxchg/atomic_update,
  1553. * and for 64-bit cmpxchg. We provide it as a macro and put
  1554. * it into both versions. We can't share the code literally
  1555. * since it depends on having the right branch-back address.
  1556. */
  1557. .macro cmpxchg_lock, bitwidth
  1558. /* Lock; if we succeed, jump back up to the read-modify-write. */
  1559. #ifdef CONFIG_SMP
  1560. tns r21, ATOMIC_LOCK_REG_NAME
  1561. #else
  1562. /*
  1563. * Non-SMP preserves all the lock infrastructure, to keep the
  1564. * code simpler for the interesting (SMP) case. However, we do
  1565. * one small optimization here and in atomic_asm.S, which is
  1566. * to fake out acquiring the actual lock in the atomic_lock table.
  1567. */
  1568. movei r21, 0
  1569. #endif
  1570. /* Issue the slow SPR here while the tns result is in flight. */
  1571. mfspr r28, SPR_EX_CONTEXT_K_0
  1572. {
  1573. addi r28, r28, 8 /* return to the instruction after the swint1 */
  1574. bzt r21, .Ldo_cmpxchg\bitwidth
  1575. }
  1576. /*
  1577. * The preceding instruction is the last thing that must be
  1578. * hot in the icache before we do the "tns" above.
  1579. */
  1580. #ifdef CONFIG_SMP
  1581. /*
  1582. * We failed to acquire the tns lock on our first try. Now use
  1583. * bounded exponential backoff to retry, like __atomic_spinlock().
  1584. */
  1585. {
  1586. moveli r23, 2048 /* maximum backoff time in cycles */
  1587. moveli r25, 32 /* starting backoff time in cycles */
  1588. }
  1589. 1: mfspr r26, CYCLE_LOW /* get start point for this backoff */
  1590. 2: mfspr r22, CYCLE_LOW /* test to see if we've backed off enough */
  1591. sub r22, r22, r26
  1592. slt r22, r22, r25
  1593. bbst r22, 2b
  1594. {
  1595. shli r25, r25, 1 /* double the backoff; retry the tns */
  1596. tns r21, ATOMIC_LOCK_REG_NAME
  1597. }
  1598. slt r26, r23, r25 /* is the proposed backoff too big? */
  1599. {
  1600. mvnz r25, r26, r23
  1601. bzt r21, .Ldo_cmpxchg\bitwidth
  1602. }
  1603. j 1b
  1604. #endif /* CONFIG_SMP */
  1605. .endm
  1606. .Lcmpxchg32_tns:
  1607. /*
  1608. * This is the last instruction on the second cache line.
  1609. * The nop here loads the second line, then we fall through
  1610. * to the tns to load the third line before we take the lock.
  1611. */
  1612. nop
  1613. cmpxchg_lock 32
  1614. /*
  1615. * This code is invoked from sys_cmpxchg after most of the
  1616. * preconditions have been checked. We still need to check
  1617. * that r0 is 8-byte aligned, since if it's not we won't
  1618. * actually be atomic. However, ATOMIC_LOCK_REG has the atomic
  1619. * lock pointer and r27/r28 have the saved SP/PC.
  1620. * r23 is holding "r0 & 7" so we can test for alignment.
  1621. * The compare value is in r2/r3; the new value is in r4/r5.
  1622. * On return, we must put the old value in r0/r1.
  1623. */
  1624. .align 64
  1625. .Lcmpxchg64:
  1626. {
  1627. bzt r23, .Lcmpxchg64_tns
  1628. }
  1629. j .Lcmpxchg_badaddr
  1630. .Ldo_cmpxchg64:
  1631. {
  1632. lw r21, r0
  1633. addi r25, r0, 4
  1634. }
  1635. {
  1636. lw r1, r25
  1637. }
  1638. seq r26, r21, r2
  1639. {
  1640. bz r26, .Lcmpxchg64_mismatch
  1641. seq r26, r1, r3
  1642. }
  1643. {
  1644. bz r26, .Lcmpxchg64_mismatch
  1645. }
  1646. sw r0, r4
  1647. sw r25, r5
  1648. /*
  1649. * The 32-bit path provides optimized "match" and "mismatch"
  1650. * iret paths, but we don't have enough bundles in this cache line
  1651. * to do that, so we just make even the "mismatch" path do an "mf".
  1652. */
  1653. .Lcmpxchg64_mismatch:
  1654. {
  1655. move sp, r27
  1656. mtspr SPR_EX_CONTEXT_K_0, r28
  1657. }
  1658. mf
  1659. {
  1660. move r0, r21
  1661. sw ATOMIC_LOCK_REG_NAME, zero
  1662. }
  1663. iret
  1664. .Lcmpxchg64_tns:
  1665. cmpxchg_lock 64
  1666. /*
  1667. * Reset sp and revector to sys_cmpxchg_badaddr(), which will
  1668. * just raise the appropriate signal and exit. Doing it this
  1669. * way means we don't have to duplicate the code in intvec.S's
  1670. * int_hand macro that locates the top of the stack.
  1671. */
  1672. .Lcmpxchg_badaddr:
  1673. {
  1674. moveli TREG_SYSCALL_NR_NAME, __NR_cmpxchg_badaddr
  1675. move sp, r27
  1676. }
  1677. j intvec_SWINT_1
  1678. ENDPROC(sys_cmpxchg)
  1679. ENTRY(__sys_cmpxchg_end)
  1680. /* The single-step support may need to read all the registers. */
  1681. int_unalign:
  1682. push_extra_callee_saves r0
  1683. j do_trap
  1684. /* Include .intrpt array of interrupt vectors */
  1685. .section ".intrpt", "ax"
  1686. #ifndef CONFIG_USE_PMC
  1687. #define handle_perf_interrupt bad_intr
  1688. #endif
  1689. #ifndef CONFIG_HARDWALL
  1690. #define do_hardwall_trap bad_intr
  1691. #endif
  1692. int_hand INT_ITLB_MISS, ITLB_MISS, \
  1693. do_page_fault, handle_interrupt_no_single_step
  1694. int_hand INT_MEM_ERROR, MEM_ERROR, bad_intr
  1695. int_hand INT_ILL, ILL, do_trap, handle_ill
  1696. int_hand INT_GPV, GPV, do_trap
  1697. int_hand INT_SN_ACCESS, SN_ACCESS, do_trap
  1698. int_hand INT_IDN_ACCESS, IDN_ACCESS, do_trap
  1699. int_hand INT_UDN_ACCESS, UDN_ACCESS, do_trap
  1700. int_hand INT_IDN_REFILL, IDN_REFILL, bad_intr
  1701. int_hand INT_UDN_REFILL, UDN_REFILL, bad_intr
  1702. int_hand INT_IDN_COMPLETE, IDN_COMPLETE, bad_intr
  1703. int_hand INT_UDN_COMPLETE, UDN_COMPLETE, bad_intr
  1704. int_hand INT_SWINT_3, SWINT_3, do_trap
  1705. int_hand INT_SWINT_2, SWINT_2, do_trap
  1706. int_hand INT_SWINT_1, SWINT_1, SYSCALL, handle_syscall
  1707. int_hand INT_SWINT_0, SWINT_0, do_trap
  1708. int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
  1709. int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
  1710. int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
  1711. int_hand INT_DMATLB_MISS, DMATLB_MISS, do_page_fault
  1712. int_hand INT_DMATLB_ACCESS, DMATLB_ACCESS, do_page_fault
  1713. int_hand INT_SNITLB_MISS, SNITLB_MISS, do_page_fault
  1714. int_hand INT_SN_NOTIFY, SN_NOTIFY, bad_intr
  1715. int_hand INT_SN_FIREWALL, SN_FIREWALL, do_hardwall_trap
  1716. int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr
  1717. int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
  1718. int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
  1719. int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
  1720. int_hand INT_UDN_TIMER, UDN_TIMER, bad_intr
  1721. int_hand INT_DMA_NOTIFY, DMA_NOTIFY, bad_intr
  1722. int_hand INT_IDN_CA, IDN_CA, bad_intr
  1723. int_hand INT_UDN_CA, UDN_CA, bad_intr
  1724. int_hand INT_IDN_AVAIL, IDN_AVAIL, bad_intr
  1725. int_hand INT_UDN_AVAIL, UDN_AVAIL, bad_intr
  1726. int_hand INT_PERF_COUNT, PERF_COUNT, \
  1727. handle_perf_interrupt, handle_nmi
  1728. int_hand INT_INTCTRL_3, INTCTRL_3, bad_intr
  1729. #if CONFIG_KERNEL_PL == 2
  1730. dc_dispatch INT_INTCTRL_2, INTCTRL_2
  1731. int_hand INT_INTCTRL_1, INTCTRL_1, bad_intr
  1732. #else
  1733. int_hand INT_INTCTRL_2, INTCTRL_2, bad_intr
  1734. dc_dispatch INT_INTCTRL_1, INTCTRL_1
  1735. #endif
  1736. int_hand INT_INTCTRL_0, INTCTRL_0, bad_intr
  1737. int_hand INT_MESSAGE_RCV_DWNCL, MESSAGE_RCV_DWNCL, \
  1738. hv_message_intr
  1739. int_hand INT_DEV_INTR_DWNCL, DEV_INTR_DWNCL, \
  1740. tile_dev_intr
  1741. int_hand INT_I_ASID, I_ASID, bad_intr
  1742. int_hand INT_D_ASID, D_ASID, bad_intr
  1743. int_hand INT_DMATLB_MISS_DWNCL, DMATLB_MISS_DWNCL, \
  1744. do_page_fault
  1745. int_hand INT_SNITLB_MISS_DWNCL, SNITLB_MISS_DWNCL, \
  1746. do_page_fault
  1747. int_hand INT_DMATLB_ACCESS_DWNCL, DMATLB_ACCESS_DWNCL, \
  1748. do_page_fault
  1749. int_hand INT_SN_CPL, SN_CPL, bad_intr
  1750. int_hand INT_DOUBLE_FAULT, DOUBLE_FAULT, do_trap
  1751. int_hand INT_AUX_PERF_COUNT, AUX_PERF_COUNT, \
  1752. handle_perf_interrupt, handle_nmi
  1753. /* Synthetic interrupt delivered only by the simulator */
  1754. int_hand INT_BREAKPOINT, BREAKPOINT, do_breakpoint