ultra.S 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879
  1. /*
  2. * ultra.S: Don't expand these all over the place...
  3. *
  4. * Copyright (C) 1997, 2000, 2008 David S. Miller (davem@davemloft.net)
  5. */
  6. #include <asm/asi.h>
  7. #include <asm/pgtable.h>
  8. #include <asm/page.h>
  9. #include <asm/spitfire.h>
  10. #include <asm/mmu_context.h>
  11. #include <asm/mmu.h>
  12. #include <asm/pil.h>
  13. #include <asm/head.h>
  14. #include <asm/thread_info.h>
  15. #include <asm/cacheflush.h>
  16. #include <asm/hypervisor.h>
  17. #include <asm/cpudata.h>
  18. /* Basically, most of the Spitfire vs. Cheetah madness
  19. * has to do with the fact that Cheetah does not support
  20. * IMMU flushes out of the secondary context. Someone needs
  21. * to throw a south lake birthday party for the folks
  22. * in Microelectronics who refused to fix this shit.
  23. */
  24. /* This file is meant to be read efficiently by the CPU, not humans.
  25. * Staraj sie tego nikomu nie pierdolnac...
  26. */
  27. .text
  28. .align 32
  29. .globl __flush_tlb_mm
  30. __flush_tlb_mm: /* 18 insns */
  31. /* %o0=(ctx & TAG_CONTEXT_BITS), %o1=SECONDARY_CONTEXT */
  32. ldxa [%o1] ASI_DMMU, %g2
  33. cmp %g2, %o0
  34. bne,pn %icc, __spitfire_flush_tlb_mm_slow
  35. mov 0x50, %g3
  36. stxa %g0, [%g3] ASI_DMMU_DEMAP
  37. stxa %g0, [%g3] ASI_IMMU_DEMAP
  38. sethi %hi(KERNBASE), %g3
  39. flush %g3
  40. retl
  41. nop
  42. nop
  43. nop
  44. nop
  45. nop
  46. nop
  47. nop
  48. nop
  49. nop
  50. nop
  51. .align 32
  52. .globl __flush_tlb_page
  53. __flush_tlb_page: /* 22 insns */
  54. /* %o0 = context, %o1 = vaddr */
  55. rdpr %pstate, %g7
  56. andn %g7, PSTATE_IE, %g2
  57. wrpr %g2, %pstate
  58. mov SECONDARY_CONTEXT, %o4
  59. ldxa [%o4] ASI_DMMU, %g2
  60. stxa %o0, [%o4] ASI_DMMU
  61. andcc %o1, 1, %g0
  62. andn %o1, 1, %o3
  63. be,pn %icc, 1f
  64. or %o3, 0x10, %o3
  65. stxa %g0, [%o3] ASI_IMMU_DEMAP
  66. 1: stxa %g0, [%o3] ASI_DMMU_DEMAP
  67. membar #Sync
  68. stxa %g2, [%o4] ASI_DMMU
  69. sethi %hi(KERNBASE), %o4
  70. flush %o4
  71. retl
  72. wrpr %g7, 0x0, %pstate
  73. nop
  74. nop
  75. nop
  76. nop
  77. .align 32
  78. .globl __flush_tlb_pending
  79. __flush_tlb_pending: /* 26 insns */
  80. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  81. rdpr %pstate, %g7
  82. sllx %o1, 3, %o1
  83. andn %g7, PSTATE_IE, %g2
  84. wrpr %g2, %pstate
  85. mov SECONDARY_CONTEXT, %o4
  86. ldxa [%o4] ASI_DMMU, %g2
  87. stxa %o0, [%o4] ASI_DMMU
  88. 1: sub %o1, (1 << 3), %o1
  89. ldx [%o2 + %o1], %o3
  90. andcc %o3, 1, %g0
  91. andn %o3, 1, %o3
  92. be,pn %icc, 2f
  93. or %o3, 0x10, %o3
  94. stxa %g0, [%o3] ASI_IMMU_DEMAP
  95. 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
  96. membar #Sync
  97. brnz,pt %o1, 1b
  98. nop
  99. stxa %g2, [%o4] ASI_DMMU
  100. sethi %hi(KERNBASE), %o4
  101. flush %o4
  102. retl
  103. wrpr %g7, 0x0, %pstate
  104. nop
  105. nop
  106. nop
  107. nop
  108. .align 32
  109. .globl __flush_tlb_kernel_range
  110. __flush_tlb_kernel_range: /* 16 insns */
  111. /* %o0=start, %o1=end */
  112. cmp %o0, %o1
  113. be,pn %xcc, 2f
  114. sethi %hi(PAGE_SIZE), %o4
  115. sub %o1, %o0, %o3
  116. sub %o3, %o4, %o3
  117. or %o0, 0x20, %o0 ! Nucleus
  118. 1: stxa %g0, [%o0 + %o3] ASI_DMMU_DEMAP
  119. stxa %g0, [%o0 + %o3] ASI_IMMU_DEMAP
  120. membar #Sync
  121. brnz,pt %o3, 1b
  122. sub %o3, %o4, %o3
  123. 2: sethi %hi(KERNBASE), %o3
  124. flush %o3
  125. retl
  126. nop
  127. nop
  128. __spitfire_flush_tlb_mm_slow:
  129. rdpr %pstate, %g1
  130. wrpr %g1, PSTATE_IE, %pstate
  131. stxa %o0, [%o1] ASI_DMMU
  132. stxa %g0, [%g3] ASI_DMMU_DEMAP
  133. stxa %g0, [%g3] ASI_IMMU_DEMAP
  134. flush %g6
  135. stxa %g2, [%o1] ASI_DMMU
  136. sethi %hi(KERNBASE), %o1
  137. flush %o1
  138. retl
  139. wrpr %g1, 0, %pstate
  140. /*
  141. * The following code flushes one page_size worth.
  142. */
  143. .section .kprobes.text, "ax"
  144. .align 32
  145. .globl __flush_icache_page
  146. __flush_icache_page: /* %o0 = phys_page */
  147. srlx %o0, PAGE_SHIFT, %o0
  148. sethi %hi(PAGE_OFFSET), %g1
  149. sllx %o0, PAGE_SHIFT, %o0
  150. sethi %hi(PAGE_SIZE), %g2
  151. ldx [%g1 + %lo(PAGE_OFFSET)], %g1
  152. add %o0, %g1, %o0
  153. 1: subcc %g2, 32, %g2
  154. bne,pt %icc, 1b
  155. flush %o0 + %g2
  156. retl
  157. nop
  158. #ifdef DCACHE_ALIASING_POSSIBLE
  159. #if (PAGE_SHIFT != 13)
  160. #error only page shift of 13 is supported by dcache flush
  161. #endif
  162. #define DTAG_MASK 0x3
  163. /* This routine is Spitfire specific so the hardcoded
  164. * D-cache size and line-size are OK.
  165. */
  166. .align 64
  167. .globl __flush_dcache_page
  168. __flush_dcache_page: /* %o0=kaddr, %o1=flush_icache */
  169. sethi %hi(PAGE_OFFSET), %g1
  170. ldx [%g1 + %lo(PAGE_OFFSET)], %g1
  171. sub %o0, %g1, %o0 ! physical address
  172. srlx %o0, 11, %o0 ! make D-cache TAG
  173. sethi %hi(1 << 14), %o2 ! D-cache size
  174. sub %o2, (1 << 5), %o2 ! D-cache line size
  175. 1: ldxa [%o2] ASI_DCACHE_TAG, %o3 ! load D-cache TAG
  176. andcc %o3, DTAG_MASK, %g0 ! Valid?
  177. be,pn %xcc, 2f ! Nope, branch
  178. andn %o3, DTAG_MASK, %o3 ! Clear valid bits
  179. cmp %o3, %o0 ! TAG match?
  180. bne,pt %xcc, 2f ! Nope, branch
  181. nop
  182. stxa %g0, [%o2] ASI_DCACHE_TAG ! Invalidate TAG
  183. membar #Sync
  184. 2: brnz,pt %o2, 1b
  185. sub %o2, (1 << 5), %o2 ! D-cache line size
  186. /* The I-cache does not snoop local stores so we
  187. * better flush that too when necessary.
  188. */
  189. brnz,pt %o1, __flush_icache_page
  190. sllx %o0, 11, %o0
  191. retl
  192. nop
  193. #endif /* DCACHE_ALIASING_POSSIBLE */
  194. .previous
  195. /* Cheetah specific versions, patched at boot time. */
  196. __cheetah_flush_tlb_mm: /* 19 insns */
  197. rdpr %pstate, %g7
  198. andn %g7, PSTATE_IE, %g2
  199. wrpr %g2, 0x0, %pstate
  200. wrpr %g0, 1, %tl
  201. mov PRIMARY_CONTEXT, %o2
  202. mov 0x40, %g3
  203. ldxa [%o2] ASI_DMMU, %g2
  204. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o1
  205. sllx %o1, CTX_PGSZ1_NUC_SHIFT, %o1
  206. or %o0, %o1, %o0 /* Preserve nucleus page size fields */
  207. stxa %o0, [%o2] ASI_DMMU
  208. stxa %g0, [%g3] ASI_DMMU_DEMAP
  209. stxa %g0, [%g3] ASI_IMMU_DEMAP
  210. stxa %g2, [%o2] ASI_DMMU
  211. sethi %hi(KERNBASE), %o2
  212. flush %o2
  213. wrpr %g0, 0, %tl
  214. retl
  215. wrpr %g7, 0x0, %pstate
  216. __cheetah_flush_tlb_page: /* 22 insns */
  217. /* %o0 = context, %o1 = vaddr */
  218. rdpr %pstate, %g7
  219. andn %g7, PSTATE_IE, %g2
  220. wrpr %g2, 0x0, %pstate
  221. wrpr %g0, 1, %tl
  222. mov PRIMARY_CONTEXT, %o4
  223. ldxa [%o4] ASI_DMMU, %g2
  224. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
  225. sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
  226. or %o0, %o3, %o0 /* Preserve nucleus page size fields */
  227. stxa %o0, [%o4] ASI_DMMU
  228. andcc %o1, 1, %g0
  229. be,pn %icc, 1f
  230. andn %o1, 1, %o3
  231. stxa %g0, [%o3] ASI_IMMU_DEMAP
  232. 1: stxa %g0, [%o3] ASI_DMMU_DEMAP
  233. membar #Sync
  234. stxa %g2, [%o4] ASI_DMMU
  235. sethi %hi(KERNBASE), %o4
  236. flush %o4
  237. wrpr %g0, 0, %tl
  238. retl
  239. wrpr %g7, 0x0, %pstate
  240. __cheetah_flush_tlb_pending: /* 27 insns */
  241. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  242. rdpr %pstate, %g7
  243. sllx %o1, 3, %o1
  244. andn %g7, PSTATE_IE, %g2
  245. wrpr %g2, 0x0, %pstate
  246. wrpr %g0, 1, %tl
  247. mov PRIMARY_CONTEXT, %o4
  248. ldxa [%o4] ASI_DMMU, %g2
  249. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3
  250. sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3
  251. or %o0, %o3, %o0 /* Preserve nucleus page size fields */
  252. stxa %o0, [%o4] ASI_DMMU
  253. 1: sub %o1, (1 << 3), %o1
  254. ldx [%o2 + %o1], %o3
  255. andcc %o3, 1, %g0
  256. be,pn %icc, 2f
  257. andn %o3, 1, %o3
  258. stxa %g0, [%o3] ASI_IMMU_DEMAP
  259. 2: stxa %g0, [%o3] ASI_DMMU_DEMAP
  260. membar #Sync
  261. brnz,pt %o1, 1b
  262. nop
  263. stxa %g2, [%o4] ASI_DMMU
  264. sethi %hi(KERNBASE), %o4
  265. flush %o4
  266. wrpr %g0, 0, %tl
  267. retl
  268. wrpr %g7, 0x0, %pstate
  269. #ifdef DCACHE_ALIASING_POSSIBLE
  270. __cheetah_flush_dcache_page: /* 11 insns */
  271. sethi %hi(PAGE_OFFSET), %g1
  272. ldx [%g1 + %lo(PAGE_OFFSET)], %g1
  273. sub %o0, %g1, %o0
  274. sethi %hi(PAGE_SIZE), %o4
  275. 1: subcc %o4, (1 << 5), %o4
  276. stxa %g0, [%o0 + %o4] ASI_DCACHE_INVALIDATE
  277. membar #Sync
  278. bne,pt %icc, 1b
  279. nop
  280. retl /* I-cache flush never needed on Cheetah, see callers. */
  281. nop
  282. #endif /* DCACHE_ALIASING_POSSIBLE */
  283. /* Hypervisor specific versions, patched at boot time. */
  284. __hypervisor_tlb_tl0_error:
  285. save %sp, -192, %sp
  286. mov %i0, %o0
  287. call hypervisor_tlbop_error
  288. mov %i1, %o1
  289. ret
  290. restore
  291. __hypervisor_flush_tlb_mm: /* 10 insns */
  292. mov %o0, %o2 /* ARG2: mmu context */
  293. mov 0, %o0 /* ARG0: CPU lists unimplemented */
  294. mov 0, %o1 /* ARG1: CPU lists unimplemented */
  295. mov HV_MMU_ALL, %o3 /* ARG3: flags */
  296. mov HV_FAST_MMU_DEMAP_CTX, %o5
  297. ta HV_FAST_TRAP
  298. brnz,pn %o0, __hypervisor_tlb_tl0_error
  299. mov HV_FAST_MMU_DEMAP_CTX, %o1
  300. retl
  301. nop
  302. __hypervisor_flush_tlb_page: /* 11 insns */
  303. /* %o0 = context, %o1 = vaddr */
  304. mov %o0, %g2
  305. mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */
  306. mov %g2, %o1 /* ARG1: mmu context */
  307. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  308. srlx %o0, PAGE_SHIFT, %o0
  309. sllx %o0, PAGE_SHIFT, %o0
  310. ta HV_MMU_UNMAP_ADDR_TRAP
  311. brnz,pn %o0, __hypervisor_tlb_tl0_error
  312. mov HV_MMU_UNMAP_ADDR_TRAP, %o1
  313. retl
  314. nop
  315. __hypervisor_flush_tlb_pending: /* 16 insns */
  316. /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */
  317. sllx %o1, 3, %g1
  318. mov %o2, %g2
  319. mov %o0, %g3
  320. 1: sub %g1, (1 << 3), %g1
  321. ldx [%g2 + %g1], %o0 /* ARG0: vaddr + IMMU-bit */
  322. mov %g3, %o1 /* ARG1: mmu context */
  323. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  324. srlx %o0, PAGE_SHIFT, %o0
  325. sllx %o0, PAGE_SHIFT, %o0
  326. ta HV_MMU_UNMAP_ADDR_TRAP
  327. brnz,pn %o0, __hypervisor_tlb_tl0_error
  328. mov HV_MMU_UNMAP_ADDR_TRAP, %o1
  329. brnz,pt %g1, 1b
  330. nop
  331. retl
  332. nop
  333. __hypervisor_flush_tlb_kernel_range: /* 16 insns */
  334. /* %o0=start, %o1=end */
  335. cmp %o0, %o1
  336. be,pn %xcc, 2f
  337. sethi %hi(PAGE_SIZE), %g3
  338. mov %o0, %g1
  339. sub %o1, %g1, %g2
  340. sub %g2, %g3, %g2
  341. 1: add %g1, %g2, %o0 /* ARG0: virtual address */
  342. mov 0, %o1 /* ARG1: mmu context */
  343. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  344. ta HV_MMU_UNMAP_ADDR_TRAP
  345. brnz,pn %o0, __hypervisor_tlb_tl0_error
  346. mov HV_MMU_UNMAP_ADDR_TRAP, %o1
  347. brnz,pt %g2, 1b
  348. sub %g2, %g3, %g2
  349. 2: retl
  350. nop
  351. #ifdef DCACHE_ALIASING_POSSIBLE
  352. /* XXX Niagara and friends have an 8K cache, so no aliasing is
  353. * XXX possible, but nothing explicit in the Hypervisor API
  354. * XXX guarantees this.
  355. */
  356. __hypervisor_flush_dcache_page: /* 2 insns */
  357. retl
  358. nop
  359. #endif
  360. tlb_patch_one:
  361. 1: lduw [%o1], %g1
  362. stw %g1, [%o0]
  363. flush %o0
  364. subcc %o2, 1, %o2
  365. add %o1, 4, %o1
  366. bne,pt %icc, 1b
  367. add %o0, 4, %o0
  368. retl
  369. nop
  370. .globl cheetah_patch_cachetlbops
  371. cheetah_patch_cachetlbops:
  372. save %sp, -128, %sp
  373. sethi %hi(__flush_tlb_mm), %o0
  374. or %o0, %lo(__flush_tlb_mm), %o0
  375. sethi %hi(__cheetah_flush_tlb_mm), %o1
  376. or %o1, %lo(__cheetah_flush_tlb_mm), %o1
  377. call tlb_patch_one
  378. mov 19, %o2
  379. sethi %hi(__flush_tlb_page), %o0
  380. or %o0, %lo(__flush_tlb_page), %o0
  381. sethi %hi(__cheetah_flush_tlb_page), %o1
  382. or %o1, %lo(__cheetah_flush_tlb_page), %o1
  383. call tlb_patch_one
  384. mov 22, %o2
  385. sethi %hi(__flush_tlb_pending), %o0
  386. or %o0, %lo(__flush_tlb_pending), %o0
  387. sethi %hi(__cheetah_flush_tlb_pending), %o1
  388. or %o1, %lo(__cheetah_flush_tlb_pending), %o1
  389. call tlb_patch_one
  390. mov 27, %o2
  391. #ifdef DCACHE_ALIASING_POSSIBLE
  392. sethi %hi(__flush_dcache_page), %o0
  393. or %o0, %lo(__flush_dcache_page), %o0
  394. sethi %hi(__cheetah_flush_dcache_page), %o1
  395. or %o1, %lo(__cheetah_flush_dcache_page), %o1
  396. call tlb_patch_one
  397. mov 11, %o2
  398. #endif /* DCACHE_ALIASING_POSSIBLE */
  399. ret
  400. restore
  401. #ifdef CONFIG_SMP
  402. /* These are all called by the slaves of a cross call, at
  403. * trap level 1, with interrupts fully disabled.
  404. *
  405. * Register usage:
  406. * %g5 mm->context (all tlb flushes)
  407. * %g1 address arg 1 (tlb page and range flushes)
  408. * %g7 address arg 2 (tlb range flush only)
  409. *
  410. * %g6 scratch 1
  411. * %g2 scratch 2
  412. * %g3 scratch 3
  413. * %g4 scratch 4
  414. */
  415. .align 32
  416. .globl xcall_flush_tlb_mm
  417. xcall_flush_tlb_mm: /* 21 insns */
  418. mov PRIMARY_CONTEXT, %g2
  419. ldxa [%g2] ASI_DMMU, %g3
  420. srlx %g3, CTX_PGSZ1_NUC_SHIFT, %g4
  421. sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
  422. or %g5, %g4, %g5 /* Preserve nucleus page size fields */
  423. stxa %g5, [%g2] ASI_DMMU
  424. mov 0x40, %g4
  425. stxa %g0, [%g4] ASI_DMMU_DEMAP
  426. stxa %g0, [%g4] ASI_IMMU_DEMAP
  427. stxa %g3, [%g2] ASI_DMMU
  428. retry
  429. nop
  430. nop
  431. nop
  432. nop
  433. nop
  434. nop
  435. nop
  436. nop
  437. nop
  438. nop
  439. .globl xcall_flush_tlb_page
  440. xcall_flush_tlb_page: /* 17 insns */
  441. /* %g5=context, %g1=vaddr */
  442. mov PRIMARY_CONTEXT, %g4
  443. ldxa [%g4] ASI_DMMU, %g2
  444. srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4
  445. sllx %g4, CTX_PGSZ1_NUC_SHIFT, %g4
  446. or %g5, %g4, %g5
  447. mov PRIMARY_CONTEXT, %g4
  448. stxa %g5, [%g4] ASI_DMMU
  449. andcc %g1, 0x1, %g0
  450. be,pn %icc, 2f
  451. andn %g1, 0x1, %g5
  452. stxa %g0, [%g5] ASI_IMMU_DEMAP
  453. 2: stxa %g0, [%g5] ASI_DMMU_DEMAP
  454. membar #Sync
  455. stxa %g2, [%g4] ASI_DMMU
  456. retry
  457. nop
  458. nop
  459. .globl xcall_flush_tlb_kernel_range
  460. xcall_flush_tlb_kernel_range: /* 25 insns */
  461. sethi %hi(PAGE_SIZE - 1), %g2
  462. or %g2, %lo(PAGE_SIZE - 1), %g2
  463. andn %g1, %g2, %g1
  464. andn %g7, %g2, %g7
  465. sub %g7, %g1, %g3
  466. add %g2, 1, %g2
  467. sub %g3, %g2, %g3
  468. or %g1, 0x20, %g1 ! Nucleus
  469. 1: stxa %g0, [%g1 + %g3] ASI_DMMU_DEMAP
  470. stxa %g0, [%g1 + %g3] ASI_IMMU_DEMAP
  471. membar #Sync
  472. brnz,pt %g3, 1b
  473. sub %g3, %g2, %g3
  474. retry
  475. nop
  476. nop
  477. nop
  478. nop
  479. nop
  480. nop
  481. nop
  482. nop
  483. nop
  484. nop
  485. nop
  486. /* This runs in a very controlled environment, so we do
  487. * not need to worry about BH races etc.
  488. */
  489. .globl xcall_sync_tick
  490. xcall_sync_tick:
  491. 661: rdpr %pstate, %g2
  492. wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
  493. .section .sun4v_2insn_patch, "ax"
  494. .word 661b
  495. nop
  496. nop
  497. .previous
  498. rdpr %pil, %g2
  499. wrpr %g0, PIL_NORMAL_MAX, %pil
  500. sethi %hi(109f), %g7
  501. b,pt %xcc, etrap_irq
  502. 109: or %g7, %lo(109b), %g7
  503. #ifdef CONFIG_TRACE_IRQFLAGS
  504. call trace_hardirqs_off
  505. nop
  506. #endif
  507. call smp_synchronize_tick_client
  508. nop
  509. b rtrap_xcall
  510. ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
  511. .globl xcall_fetch_glob_regs
  512. xcall_fetch_glob_regs:
  513. sethi %hi(global_cpu_snapshot), %g1
  514. or %g1, %lo(global_cpu_snapshot), %g1
  515. __GET_CPUID(%g2)
  516. sllx %g2, 6, %g3
  517. add %g1, %g3, %g1
  518. rdpr %tstate, %g7
  519. stx %g7, [%g1 + GR_SNAP_TSTATE]
  520. rdpr %tpc, %g7
  521. stx %g7, [%g1 + GR_SNAP_TPC]
  522. rdpr %tnpc, %g7
  523. stx %g7, [%g1 + GR_SNAP_TNPC]
  524. stx %o7, [%g1 + GR_SNAP_O7]
  525. stx %i7, [%g1 + GR_SNAP_I7]
  526. /* Don't try this at home kids... */
  527. rdpr %cwp, %g3
  528. sub %g3, 1, %g7
  529. wrpr %g7, %cwp
  530. mov %i7, %g7
  531. wrpr %g3, %cwp
  532. stx %g7, [%g1 + GR_SNAP_RPC]
  533. sethi %hi(trap_block), %g7
  534. or %g7, %lo(trap_block), %g7
  535. sllx %g2, TRAP_BLOCK_SZ_SHIFT, %g2
  536. add %g7, %g2, %g7
  537. ldx [%g7 + TRAP_PER_CPU_THREAD], %g3
  538. stx %g3, [%g1 + GR_SNAP_THREAD]
  539. retry
  540. .globl xcall_fetch_glob_pmu
  541. xcall_fetch_glob_pmu:
  542. sethi %hi(global_cpu_snapshot), %g1
  543. or %g1, %lo(global_cpu_snapshot), %g1
  544. __GET_CPUID(%g2)
  545. sllx %g2, 6, %g3
  546. add %g1, %g3, %g1
  547. rd %pic, %g7
  548. stx %g7, [%g1 + (4 * 8)]
  549. rd %pcr, %g7
  550. stx %g7, [%g1 + (0 * 8)]
  551. retry
  552. .globl xcall_fetch_glob_pmu_n4
  553. xcall_fetch_glob_pmu_n4:
  554. sethi %hi(global_cpu_snapshot), %g1
  555. or %g1, %lo(global_cpu_snapshot), %g1
  556. __GET_CPUID(%g2)
  557. sllx %g2, 6, %g3
  558. add %g1, %g3, %g1
  559. ldxa [%g0] ASI_PIC, %g7
  560. stx %g7, [%g1 + (4 * 8)]
  561. mov 0x08, %g3
  562. ldxa [%g3] ASI_PIC, %g7
  563. stx %g7, [%g1 + (5 * 8)]
  564. mov 0x10, %g3
  565. ldxa [%g3] ASI_PIC, %g7
  566. stx %g7, [%g1 + (6 * 8)]
  567. mov 0x18, %g3
  568. ldxa [%g3] ASI_PIC, %g7
  569. stx %g7, [%g1 + (7 * 8)]
  570. mov %o0, %g2
  571. mov %o1, %g3
  572. mov %o5, %g7
  573. mov HV_FAST_VT_GET_PERFREG, %o5
  574. mov 3, %o0
  575. ta HV_FAST_TRAP
  576. stx %o1, [%g1 + (3 * 8)]
  577. mov HV_FAST_VT_GET_PERFREG, %o5
  578. mov 2, %o0
  579. ta HV_FAST_TRAP
  580. stx %o1, [%g1 + (2 * 8)]
  581. mov HV_FAST_VT_GET_PERFREG, %o5
  582. mov 1, %o0
  583. ta HV_FAST_TRAP
  584. stx %o1, [%g1 + (1 * 8)]
  585. mov HV_FAST_VT_GET_PERFREG, %o5
  586. mov 0, %o0
  587. ta HV_FAST_TRAP
  588. stx %o1, [%g1 + (0 * 8)]
  589. mov %g2, %o0
  590. mov %g3, %o1
  591. mov %g7, %o5
  592. retry
  593. #ifdef DCACHE_ALIASING_POSSIBLE
  594. .align 32
  595. .globl xcall_flush_dcache_page_cheetah
  596. xcall_flush_dcache_page_cheetah: /* %g1 == physical page address */
  597. sethi %hi(PAGE_SIZE), %g3
  598. 1: subcc %g3, (1 << 5), %g3
  599. stxa %g0, [%g1 + %g3] ASI_DCACHE_INVALIDATE
  600. membar #Sync
  601. bne,pt %icc, 1b
  602. nop
  603. retry
  604. nop
  605. #endif /* DCACHE_ALIASING_POSSIBLE */
  606. .globl xcall_flush_dcache_page_spitfire
  607. xcall_flush_dcache_page_spitfire: /* %g1 == physical page address
  608. %g7 == kernel page virtual address
  609. %g5 == (page->mapping != NULL) */
  610. #ifdef DCACHE_ALIASING_POSSIBLE
  611. srlx %g1, (13 - 2), %g1 ! Form tag comparitor
  612. sethi %hi(L1DCACHE_SIZE), %g3 ! D$ size == 16K
  613. sub %g3, (1 << 5), %g3 ! D$ linesize == 32
  614. 1: ldxa [%g3] ASI_DCACHE_TAG, %g2
  615. andcc %g2, 0x3, %g0
  616. be,pn %xcc, 2f
  617. andn %g2, 0x3, %g2
  618. cmp %g2, %g1
  619. bne,pt %xcc, 2f
  620. nop
  621. stxa %g0, [%g3] ASI_DCACHE_TAG
  622. membar #Sync
  623. 2: cmp %g3, 0
  624. bne,pt %xcc, 1b
  625. sub %g3, (1 << 5), %g3
  626. brz,pn %g5, 2f
  627. #endif /* DCACHE_ALIASING_POSSIBLE */
  628. sethi %hi(PAGE_SIZE), %g3
  629. 1: flush %g7
  630. subcc %g3, (1 << 5), %g3
  631. bne,pt %icc, 1b
  632. add %g7, (1 << 5), %g7
  633. 2: retry
  634. nop
  635. nop
  636. /* %g5: error
  637. * %g6: tlb op
  638. */
  639. __hypervisor_tlb_xcall_error:
  640. mov %g5, %g4
  641. mov %g6, %g5
  642. ba,pt %xcc, etrap
  643. rd %pc, %g7
  644. mov %l4, %o0
  645. call hypervisor_tlbop_error_xcall
  646. mov %l5, %o1
  647. ba,a,pt %xcc, rtrap
  648. .globl __hypervisor_xcall_flush_tlb_mm
  649. __hypervisor_xcall_flush_tlb_mm: /* 21 insns */
  650. /* %g5=ctx, g1,g2,g3,g4,g7=scratch, %g6=unusable */
  651. mov %o0, %g2
  652. mov %o1, %g3
  653. mov %o2, %g4
  654. mov %o3, %g1
  655. mov %o5, %g7
  656. clr %o0 /* ARG0: CPU lists unimplemented */
  657. clr %o1 /* ARG1: CPU lists unimplemented */
  658. mov %g5, %o2 /* ARG2: mmu context */
  659. mov HV_MMU_ALL, %o3 /* ARG3: flags */
  660. mov HV_FAST_MMU_DEMAP_CTX, %o5
  661. ta HV_FAST_TRAP
  662. mov HV_FAST_MMU_DEMAP_CTX, %g6
  663. brnz,pn %o0, __hypervisor_tlb_xcall_error
  664. mov %o0, %g5
  665. mov %g2, %o0
  666. mov %g3, %o1
  667. mov %g4, %o2
  668. mov %g1, %o3
  669. mov %g7, %o5
  670. membar #Sync
  671. retry
  672. .globl __hypervisor_xcall_flush_tlb_page
  673. __hypervisor_xcall_flush_tlb_page: /* 17 insns */
  674. /* %g5=ctx, %g1=vaddr */
  675. mov %o0, %g2
  676. mov %o1, %g3
  677. mov %o2, %g4
  678. mov %g1, %o0 /* ARG0: virtual address */
  679. mov %g5, %o1 /* ARG1: mmu context */
  680. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  681. srlx %o0, PAGE_SHIFT, %o0
  682. sllx %o0, PAGE_SHIFT, %o0
  683. ta HV_MMU_UNMAP_ADDR_TRAP
  684. mov HV_MMU_UNMAP_ADDR_TRAP, %g6
  685. brnz,a,pn %o0, __hypervisor_tlb_xcall_error
  686. mov %o0, %g5
  687. mov %g2, %o0
  688. mov %g3, %o1
  689. mov %g4, %o2
  690. membar #Sync
  691. retry
  692. .globl __hypervisor_xcall_flush_tlb_kernel_range
  693. __hypervisor_xcall_flush_tlb_kernel_range: /* 25 insns */
  694. /* %g1=start, %g7=end, g2,g3,g4,g5,g6=scratch */
  695. sethi %hi(PAGE_SIZE - 1), %g2
  696. or %g2, %lo(PAGE_SIZE - 1), %g2
  697. andn %g1, %g2, %g1
  698. andn %g7, %g2, %g7
  699. sub %g7, %g1, %g3
  700. add %g2, 1, %g2
  701. sub %g3, %g2, %g3
  702. mov %o0, %g2
  703. mov %o1, %g4
  704. mov %o2, %g7
  705. 1: add %g1, %g3, %o0 /* ARG0: virtual address */
  706. mov 0, %o1 /* ARG1: mmu context */
  707. mov HV_MMU_ALL, %o2 /* ARG2: flags */
  708. ta HV_MMU_UNMAP_ADDR_TRAP
  709. mov HV_MMU_UNMAP_ADDR_TRAP, %g6
  710. brnz,pn %o0, __hypervisor_tlb_xcall_error
  711. mov %o0, %g5
  712. sethi %hi(PAGE_SIZE), %o2
  713. brnz,pt %g3, 1b
  714. sub %g3, %o2, %g3
  715. mov %g2, %o0
  716. mov %g4, %o1
  717. mov %g7, %o2
  718. membar #Sync
  719. retry
  720. /* These just get rescheduled to PIL vectors. */
  721. .globl xcall_call_function
  722. xcall_call_function:
  723. wr %g0, (1 << PIL_SMP_CALL_FUNC), %set_softint
  724. retry
  725. .globl xcall_call_function_single
  726. xcall_call_function_single:
  727. wr %g0, (1 << PIL_SMP_CALL_FUNC_SNGL), %set_softint
  728. retry
  729. .globl xcall_receive_signal
  730. xcall_receive_signal:
  731. wr %g0, (1 << PIL_SMP_RECEIVE_SIGNAL), %set_softint
  732. retry
  733. .globl xcall_capture
  734. xcall_capture:
  735. wr %g0, (1 << PIL_SMP_CAPTURE), %set_softint
  736. retry
  737. .globl xcall_new_mmu_context_version
  738. xcall_new_mmu_context_version:
  739. wr %g0, (1 << PIL_SMP_CTX_NEW_VERSION), %set_softint
  740. retry
  741. #ifdef CONFIG_KGDB
  742. .globl xcall_kgdb_capture
  743. xcall_kgdb_capture:
  744. wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
  745. retry
  746. #endif
  747. #endif /* CONFIG_SMP */
  748. .globl hypervisor_patch_cachetlbops
  749. hypervisor_patch_cachetlbops:
  750. save %sp, -128, %sp
  751. sethi %hi(__flush_tlb_mm), %o0
  752. or %o0, %lo(__flush_tlb_mm), %o0
  753. sethi %hi(__hypervisor_flush_tlb_mm), %o1
  754. or %o1, %lo(__hypervisor_flush_tlb_mm), %o1
  755. call tlb_patch_one
  756. mov 10, %o2
  757. sethi %hi(__flush_tlb_page), %o0
  758. or %o0, %lo(__flush_tlb_page), %o0
  759. sethi %hi(__hypervisor_flush_tlb_page), %o1
  760. or %o1, %lo(__hypervisor_flush_tlb_page), %o1
  761. call tlb_patch_one
  762. mov 11, %o2
  763. sethi %hi(__flush_tlb_pending), %o0
  764. or %o0, %lo(__flush_tlb_pending), %o0
  765. sethi %hi(__hypervisor_flush_tlb_pending), %o1
  766. or %o1, %lo(__hypervisor_flush_tlb_pending), %o1
  767. call tlb_patch_one
  768. mov 16, %o2
  769. sethi %hi(__flush_tlb_kernel_range), %o0
  770. or %o0, %lo(__flush_tlb_kernel_range), %o0
  771. sethi %hi(__hypervisor_flush_tlb_kernel_range), %o1
  772. or %o1, %lo(__hypervisor_flush_tlb_kernel_range), %o1
  773. call tlb_patch_one
  774. mov 16, %o2
  775. #ifdef DCACHE_ALIASING_POSSIBLE
  776. sethi %hi(__flush_dcache_page), %o0
  777. or %o0, %lo(__flush_dcache_page), %o0
  778. sethi %hi(__hypervisor_flush_dcache_page), %o1
  779. or %o1, %lo(__hypervisor_flush_dcache_page), %o1
  780. call tlb_patch_one
  781. mov 2, %o2
  782. #endif /* DCACHE_ALIASING_POSSIBLE */
  783. #ifdef CONFIG_SMP
  784. sethi %hi(xcall_flush_tlb_mm), %o0
  785. or %o0, %lo(xcall_flush_tlb_mm), %o0
  786. sethi %hi(__hypervisor_xcall_flush_tlb_mm), %o1
  787. or %o1, %lo(__hypervisor_xcall_flush_tlb_mm), %o1
  788. call tlb_patch_one
  789. mov 21, %o2
  790. sethi %hi(xcall_flush_tlb_page), %o0
  791. or %o0, %lo(xcall_flush_tlb_page), %o0
  792. sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1
  793. or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1
  794. call tlb_patch_one
  795. mov 17, %o2
  796. sethi %hi(xcall_flush_tlb_kernel_range), %o0
  797. or %o0, %lo(xcall_flush_tlb_kernel_range), %o0
  798. sethi %hi(__hypervisor_xcall_flush_tlb_kernel_range), %o1
  799. or %o1, %lo(__hypervisor_xcall_flush_tlb_kernel_range), %o1
  800. call tlb_patch_one
  801. mov 25, %o2
  802. #endif /* CONFIG_SMP */
  803. ret
  804. restore