locore.S 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980
  1. /*-
  2. * Copyright (C) 2007-2009 Semihalf, Rafal Jaworowski <raj@semihalf.com>
  3. * Copyright (C) 2006 Semihalf, Marian Balakowicz <m8@semihalf.com>
  4. * All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions
  8. * are met:
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * 2. Redistributions in binary form must reproduce the above copyright
  12. * notice, this list of conditions and the following disclaimer in the
  13. * documentation and/or other materials provided with the distribution.
  14. *
  15. * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  16. * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  17. * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
  18. * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  19. * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
  20. * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  21. * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  22. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  23. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  24. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  25. *
  26. * $FreeBSD$
  27. */
  28. #include "assym.inc"
  29. #include "opt_hwpmc_hooks.h"
  30. #include <machine/asm.h>
  31. #include <machine/hid.h>
  32. #include <machine/param.h>
  33. #include <machine/spr.h>
  34. #include <machine/pte.h>
  35. #include <machine/trap.h>
  36. #include <machine/vmparam.h>
  37. #include <machine/tlb.h>
  38. #ifdef _CALL_ELF
  39. .abiversion _CALL_ELF
  40. #endif
  41. #define TMPSTACKSZ 16384
  42. #ifdef __powerpc64__
  43. #define GET_TOCBASE(r) \
  44. mfspr r, SPR_SPRG8
  45. #define TOC_RESTORE nop
  46. #define CMPI cmpdi
  47. #define CMPL cmpld
  48. #define LOAD ld
  49. #define LOADX ldarx
  50. #define STORE std
  51. #define STOREX stdcx.
  52. #define STU stdu
  53. #define CALLSIZE 48
  54. #define REDZONE 288
  55. #define THREAD_REG %r13
  56. #define ADDR(x) \
  57. .llong x
  58. #define WORD_SIZE 8
  59. #else
  60. #define GET_TOCBASE(r)
  61. #define TOC_RESTORE
  62. #define CMPI cmpwi
  63. #define CMPL cmplw
  64. #define LOAD lwz
  65. #define LOADX lwarx
  66. #define STOREX stwcx.
  67. #define STORE stw
  68. #define STU stwu
  69. #define CALLSIZE 8
  70. #define REDZONE 0
  71. #define THREAD_REG %r2
  72. #define ADDR(x) \
  73. .long x
  74. #define WORD_SIZE 4
  75. #endif
  76. #ifdef __powerpc64__
  77. /* Placate lld by creating a kboot stub. */
  78. .section ".text.kboot", "x", @progbits
  79. b __start
  80. #endif
  81. .text
  82. .globl btext
  83. btext:
  84. /*
  85. * This symbol is here for the benefit of kvm_mkdb, and is supposed to
  86. * mark the start of kernel text.
  87. */
  88. .globl kernel_text
  89. kernel_text:
  90. /*
  91. * Startup entry. Note, this must be the first thing in the text segment!
  92. */
  93. .text
  94. .globl __start
  95. __start:
  96. /*
  97. * Assumptions on the boot loader:
  98. * - System memory starts from physical address 0
  99. * - It's mapped by a single TLB1 entry
  100. * - TLB1 mapping is 1:1 pa to va
  101. * - Kernel is loaded at 64MB boundary
  102. * - All PID registers are set to the same value
  103. * - CPU is running in AS=0
  104. *
  105. * Registers contents provided by the loader(8):
  106. * r1 : stack pointer
  107. * r3 : metadata pointer
  108. *
  109. * We rearrange the TLB1 layout as follows:
  110. * - Find TLB1 entry we started in
  111. * - Make sure it's protected, invalidate other entries
  112. * - Create temp entry in the second AS (make sure it's not TLB[1])
  113. * - Switch to temp mapping
  114. * - Map 64MB of RAM in TLB1[1]
  115. * - Use AS=0, set EPN to VM_MIN_KERNEL_ADDRESS and RPN to kernel load address
  116. * - Switch to TLB1[1] mapping
  117. * - Invalidate temp mapping
  118. *
  119. * locore registers use:
  120. * r1 : stack pointer
  121. * r2 : trace pointer (AP only, for early diagnostics)
  122. * r3-r27 : scratch registers
  123. * r28 : temp TLB1 entry
  124. * r29 : initial TLB1 entry we started in
  125. * r30-r31 : arguments (metadata pointer)
  126. */
  127. /*
  128. * Keep arguments in r30 & r31 for later use.
  129. */
  130. mr %r30, %r3
  131. mr %r31, %r4
  132. /*
  133. * Initial cleanup
  134. */
  135. li %r3, PSL_DE /* Keep debug exceptions for CodeWarrior. */
  136. #ifdef __powerpc64__
  137. oris %r3, %r3, PSL_CM@h
  138. #endif
  139. mtmsr %r3
  140. isync
  141. /*
  142. * Initial HIDs configuration
  143. */
  144. 1:
  145. mfpvr %r3
  146. rlwinm %r3, %r3, 16, 16, 31
  147. lis %r4, HID0_E500_DEFAULT_SET@h
  148. ori %r4, %r4, HID0_E500_DEFAULT_SET@l
  149. /* Check for e500mc and e5500 */
  150. cmpli 0, 0, %r3, FSL_E500mc
  151. bne 2f
  152. lis %r4, HID0_E500MC_DEFAULT_SET@h
  153. ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l
  154. b 3f
  155. 2:
  156. cmpli 0, 0, %r3, FSL_E5500
  157. bne 3f
  158. lis %r4, HID0_E5500_DEFAULT_SET@h
  159. ori %r4, %r4, HID0_E5500_DEFAULT_SET@l
  160. 3:
  161. mtspr SPR_HID0, %r4
  162. isync
  163. /*
  164. * E500mc and E5500 do not have HID1 register, so skip HID1 setup on
  165. * this core.
  166. */
  167. cmpli 0, 0, %r3, FSL_E500mc
  168. beq 1f
  169. cmpli 0, 0, %r3, FSL_E5500
  170. beq 1f
  171. cmpli 0, 0, %r3, FSL_E6500
  172. beq 1f
  173. lis %r3, HID1_E500_DEFAULT_SET@h
  174. ori %r3, %r3, HID1_E500_DEFAULT_SET@l
  175. mtspr SPR_HID1, %r3
  176. isync
  177. 1:
  178. /* Invalidate all entries in TLB0 */
  179. li %r3, 0
  180. bl tlb_inval_all
  181. cmpwi %r30, 0
  182. beq done_mapping
  183. /*
  184. * Locate the TLB1 entry that maps this code
  185. */
  186. bl 1f
  187. 1: mflr %r3
  188. bl tlb1_find_current /* the entry found is returned in r29 */
  189. bl tlb1_inval_all_but_current
  190. /*
  191. * Create temporary mapping in AS=1 and switch to it
  192. */
  193. bl tlb1_temp_mapping_as1
  194. mfmsr %r3
  195. ori %r3, %r3, (PSL_IS | PSL_DS)
  196. bl 2f
  197. 2: mflr %r4
  198. addi %r4, %r4, (3f - 2b)
  199. mtspr SPR_SRR0, %r4
  200. mtspr SPR_SRR1, %r3
  201. rfi /* Switch context */
  202. /*
  203. * Invalidate initial entry
  204. */
  205. 3:
  206. mr %r3, %r29
  207. bl tlb1_inval_entry
  208. /*
  209. * Setup final mapping in TLB1[1] and switch to it
  210. */
  211. /* Final kernel mapping, map in 64 MB of RAM */
  212. lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
  213. li %r4, 0 /* Entry 0 */
  214. rlwimi %r3, %r4, 16, 10, 15
  215. mtspr SPR_MAS0, %r3
  216. isync
  217. li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
  218. oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
  219. mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
  220. isync
  221. LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)
  222. ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
  223. mtspr SPR_MAS2, %r3
  224. isync
  225. /* Discover phys load address */
  226. bl 3f
  227. 3: mflr %r4 /* Use current address */
  228. rlwinm %r4, %r4, 0, 0, 5 /* 64MB alignment mask */
  229. ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
  230. mtspr SPR_MAS3, %r4 /* Set RPN and protection */
  231. isync
  232. li %r4, 0
  233. mtspr SPR_MAS7, %r4
  234. isync
  235. tlbwe
  236. isync
  237. msync
  238. /* Switch to the above TLB1[1] mapping */
  239. bl 4f
  240. 4: mflr %r4
  241. #ifdef __powerpc64__
  242. clrldi %r4, %r4, 38
  243. clrrdi %r3, %r3, 12
  244. #else
  245. rlwinm %r4, %r4, 0, 6, 31 /* Current offset from kernel load address */
  246. rlwinm %r3, %r3, 0, 0, 19
  247. #endif
  248. add %r4, %r4, %r3 /* Convert to kernel virtual address */
  249. addi %r4, %r4, (5f - 4b)
  250. li %r3, PSL_DE /* Note AS=0 */
  251. #ifdef __powerpc64__
  252. oris %r3, %r3, PSL_CM@h
  253. #endif
  254. mtspr SPR_SRR0, %r4
  255. mtspr SPR_SRR1, %r3
  256. rfi
  257. /*
  258. * Invalidate temp mapping
  259. */
  260. 5:
  261. mr %r3, %r28
  262. bl tlb1_inval_entry
  263. done_mapping:
  264. #ifdef __powerpc64__
  265. /* Set up the TOC pointer */
  266. b 0f
  267. .align 3
  268. 0: nop
  269. bl 1f
  270. .llong __tocbase + 0x8000 - .
  271. 1: mflr %r2
  272. ld %r1,0(%r2)
  273. add %r2,%r1,%r2
  274. mtspr SPR_SPRG8, %r2
  275. nop
  276. /* Get load offset */
  277. ld %r31,-0x8000(%r2) /* First TOC entry is TOC base */
  278. subf %r31,%r31,%r2 /* Subtract from real TOC base to get base */
  279. /* Set up the stack pointer */
  280. bl 1f
  281. .llong tmpstack + TMPSTACKSZ - 96 - .
  282. 1: mflr %r3
  283. ld %r1,0(%r3)
  284. add %r1,%r1,%r3
  285. /*
  286. * Relocate kernel
  287. */
  288. bl 1f
  289. .llong _DYNAMIC-.
  290. 1: mflr %r3
  291. ld %r4,0(%r3)
  292. add %r3,%r4,%r3
  293. mr %r4,%r31
  294. #else
  295. /*
  296. * Setup a temporary stack
  297. */
  298. bl 1f
  299. .long tmpstack-.
  300. 1: mflr %r1
  301. lwz %r2,0(%r1)
  302. add %r1,%r1,%r2
  303. addi %r1, %r1, (TMPSTACKSZ - 16)
  304. /*
  305. * Relocate kernel
  306. */
  307. bl 1f
  308. .long _DYNAMIC-.
  309. .long _GLOBAL_OFFSET_TABLE_-.
  310. 1: mflr %r5
  311. lwz %r3,0(%r5) /* _DYNAMIC in %r3 */
  312. add %r3,%r3,%r5
  313. lwz %r4,4(%r5) /* GOT pointer */
  314. add %r4,%r4,%r5
  315. lwz %r4,4(%r4) /* got[0] is _DYNAMIC link addr */
  316. subf %r4,%r4,%r3 /* subtract to calculate relocbase */
  317. #endif
  318. bl CNAME(elf_reloc_self)
  319. TOC_RESTORE
  320. /*
  321. * Initialise exception vector offsets
  322. */
  323. bl CNAME(ivor_setup)
  324. TOC_RESTORE
  325. /*
  326. * Set up arguments and jump to system initialization code
  327. */
  328. mr %r3, %r30
  329. mr %r4, %r31
  330. /* Prepare core */
  331. bl CNAME(booke_init)
  332. TOC_RESTORE
  333. /* Switch to thread0.td_kstack now */
  334. mr %r1, %r3
  335. li %r3, 0
  336. STORE %r3, 0(%r1)
  337. /* Machine independet part, does not return */
  338. bl CNAME(mi_startup)
  339. TOC_RESTORE
  340. /* NOT REACHED */
  341. 5: b 5b
  342. #ifdef SMP
  343. /************************************************************************/
  344. /* AP Boot page */
  345. /************************************************************************/
  346. .text
  347. .globl __boot_page
  348. .align 12
  349. __boot_page:
  350. /*
  351. * The boot page is a special page of memory used during AP bringup.
  352. * Before the AP comes out of reset, the physical 4K page holding this
  353. * code is arranged to be mapped at 0xfffff000 by use of
  354. * platform-dependent registers.
  355. *
  356. * Alternatively, this page may be executed using an ePAPR-standardized
  357. * method -- writing to the address specified in "cpu-release-addr".
  358. *
  359. * In either case, execution begins at the last instruction of the
  360. * page, which is a branch back to the start of the page.
  361. *
  362. * The code in the page must do initial MMU setup and normalize the
  363. * TLBs for regular operation in the correct address space before
  364. * reading outside the page.
  365. *
  366. * This implementation accomplishes this by:
  367. * 1) Wiping TLB0 and all TLB1 entries but the one currently in use.
  368. * 2) Establishing a temporary 4K TLB1 mapping in AS=1, and switching
  369. * to it with rfi. This entry must NOT be in TLB1 slot 0.
  370. * (This is needed to give the code freedom to clean up AS=0.)
  371. * 3) Removing the initial TLB1 entry, leaving us with a single valid
  372. * TLB1 entry, NOT in slot 0.
  373. * 4) Installing an AS0 entry in TLB1 slot 0 mapping the 64MB kernel
  374. * segment at its final virtual address. A second rfi is done to
  375. * switch to the final address space. At this point we can finally
  376. * access the rest of the kernel segment safely.
  377. * 5) The temporary TLB1 AS=1 entry is removed, finally leaving us in
  378. * a consistent (but minimal) state.
  379. * 6) Set up TOC, stack, and pcpu registers.
  380. * 7) Now that we can finally call C code, call pmap_boostrap_ap(),
  381. * which finishes copying in the shared TLB1 entries.
  382. *
  383. * At this point, the MMU is fully set up, and we can proceed with
  384. * running the actual AP bootstrap code.
  385. *
  386. * Pieces of this code are also used for UP kernel, but in this case
  387. * the sections specific to boot page functionality are dropped by
  388. * the preprocessor.
  389. */
  390. #ifdef __powerpc64__
  391. nop /* PPC64 alignment word. 64-bit target. */
  392. #endif
  393. bl 1f /* 32-bit target. */
  394. .globl bp_trace
  395. bp_trace:
  396. ADDR(0) /* Trace pointer (%r31). */
  397. .globl bp_kernload
  398. bp_kernload:
  399. .llong 0 /* Kern phys. load address. */
  400. .globl bp_virtaddr
  401. bp_virtaddr:
  402. ADDR(0) /* Virt. address of __boot_page. */
  403. /*
  404. * Initial configuration
  405. */
  406. 1:
  407. mflr %r31 /* r31 hold the address of bp_trace */
  408. /* Set HIDs */
  409. mfpvr %r3
  410. rlwinm %r3, %r3, 16, 16, 31
  411. /* HID0 for E500 is default */
  412. lis %r4, HID0_E500_DEFAULT_SET@h
  413. ori %r4, %r4, HID0_E500_DEFAULT_SET@l
  414. cmpli 0, 0, %r3, FSL_E500mc
  415. bne 2f
  416. lis %r4, HID0_E500MC_DEFAULT_SET@h
  417. ori %r4, %r4, HID0_E500MC_DEFAULT_SET@l
  418. b 3f
  419. 2:
  420. cmpli 0, 0, %r3, FSL_E5500
  421. bne 3f
  422. lis %r4, HID0_E5500_DEFAULT_SET@h
  423. ori %r4, %r4, HID0_E5500_DEFAULT_SET@l
  424. 3:
  425. mtspr SPR_HID0, %r4
  426. isync
  427. /* Enable branch prediction */
  428. li %r3, BUCSR_BPEN
  429. mtspr SPR_BUCSR, %r3
  430. isync
  431. /* Invalidate all entries in TLB0 */
  432. li %r3, 0
  433. bl tlb_inval_all
  434. /*
  435. * Find TLB1 entry which is translating us now
  436. */
  437. bl 2f
  438. 2: mflr %r3
  439. bl tlb1_find_current /* the entry number found is in r29 */
  440. bl tlb1_inval_all_but_current
  441. /*
  442. * Create temporary translation in AS=1 and switch to it
  443. */
  444. bl tlb1_temp_mapping_as1
  445. mfmsr %r3
  446. ori %r3, %r3, (PSL_IS | PSL_DS)
  447. #ifdef __powerpc64__
  448. oris %r3, %r3, PSL_CM@h /* Ensure we're in 64-bit after RFI */
  449. #endif
  450. bl 3f
  451. 3: mflr %r4
  452. addi %r4, %r4, (4f - 3b)
  453. mtspr SPR_SRR0, %r4
  454. mtspr SPR_SRR1, %r3
  455. rfi /* Switch context */
  456. /*
  457. * Invalidate initial entry
  458. */
  459. 4:
  460. mr %r3, %r29
  461. bl tlb1_inval_entry
  462. /*
  463. * Setup final mapping in TLB1[0] and switch to it
  464. */
  465. /* Final kernel mapping, map in 64 MB of RAM */
  466. lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
  467. li %r4, 0 /* Entry 0 */
  468. rlwimi %r3, %r4, 16, 4, 15
  469. mtspr SPR_MAS0, %r3
  470. isync
  471. li %r3, (TLB_SIZE_64M << MAS1_TSIZE_SHIFT)@l
  472. oris %r3, %r3, (MAS1_VALID | MAS1_IPROT)@h
  473. mtspr SPR_MAS1, %r3 /* note TS was not filled, so it's TS=0 */
  474. isync
  475. LOAD_ADDR(%r3, VM_MIN_KERNEL_ADDRESS)
  476. ori %r3, %r3, (_TLB_ENTRY_SHARED | MAS2_M)@l /* WIMGE = 0b00100 */
  477. mtspr SPR_MAS2, %r3
  478. isync
  479. /* Retrieve kernel load [physical] address from bp_kernload */
  480. 5:
  481. mflr %r3
  482. #ifdef __powerpc64__
  483. clrrdi %r3, %r3, PAGE_SHIFT /* trunc_page(%r3) */
  484. #else
  485. clrrwi %r3, %r3, PAGE_SHIFT /* trunc_page(%r3) */
  486. #endif
  487. /* Load lower half of the kernel loadaddr. */
  488. lwz %r4, (bp_kernload - __boot_page + 4)(%r3)
  489. LOAD %r5, (bp_virtaddr - __boot_page)(%r3)
  490. /* Set RPN and protection */
  491. ori %r4, %r4, (MAS3_SX | MAS3_SW | MAS3_SR)@l
  492. mtspr SPR_MAS3, %r4
  493. isync
  494. lwz %r4, (bp_kernload - __boot_page)(%r3)
  495. mtspr SPR_MAS7, %r4
  496. isync
  497. tlbwe
  498. isync
  499. msync
  500. /* Switch to the final mapping */
  501. bl 6f
  502. 6: mflr %r3
  503. rlwinm %r3, %r3, 0, 0xfff /* Offset from boot page start */
  504. add %r3, %r3, %r5 /* Make this a virtual address */
  505. addi %r3, %r3, (7f - 6b) /* And figure out return address. */
  506. #ifdef __powerpc64__
  507. lis %r4, PSL_CM@h /* Note AS=0 */
  508. #else
  509. li %r4, 0 /* Note AS=0 */
  510. #endif
  511. mtspr SPR_SRR0, %r3
  512. mtspr SPR_SRR1, %r4
  513. rfi
  514. 7:
  515. /*
  516. * At this point we're running at virtual addresses VM_MIN_KERNEL_ADDRESS and
  517. * beyond so it's allowed to directly access all locations the kernel was linked
  518. * against.
  519. */
  520. /*
  521. * Invalidate temp mapping
  522. */
  523. mr %r3, %r28
  524. bl tlb1_inval_entry
  525. #ifdef __powerpc64__
  526. /* Set up the TOC pointer */
  527. b 0f
  528. .align 3
  529. 0: nop
  530. bl 1f
  531. .llong __tocbase + 0x8000 - .
  532. 1: mflr %r2
  533. ld %r1,0(%r2)
  534. add %r2,%r1,%r2
  535. mtspr SPR_SPRG8, %r2
  536. /* Set up the stack pointer */
  537. addis %r1,%r2,TOC_REF(tmpstack)@ha
  538. ld %r1,TOC_REF(tmpstack)@l(%r1)
  539. addi %r1,%r1,TMPSTACKSZ-96
  540. #else
  541. /*
  542. * Setup a temporary stack
  543. */
  544. bl 1f
  545. .long tmpstack-.
  546. 1: mflr %r1
  547. lwz %r2,0(%r1)
  548. add %r1,%r1,%r2
  549. stw %r1, 0(%r1)
  550. addi %r1, %r1, (TMPSTACKSZ - 16)
  551. #endif
  552. /*
  553. * Initialise exception vector offsets
  554. */
  555. bl CNAME(ivor_setup)
  556. TOC_RESTORE
  557. /*
  558. * Assign our pcpu instance
  559. */
  560. bl 1f
  561. .long ap_pcpu-.
  562. 1: mflr %r4
  563. lwz %r3, 0(%r4)
  564. add %r3, %r3, %r4
  565. LOAD %r3, 0(%r3)
  566. mtsprg0 %r3
  567. bl CNAME(pmap_bootstrap_ap)
  568. TOC_RESTORE
  569. bl CNAME(cpudep_ap_bootstrap)
  570. TOC_RESTORE
  571. /* Switch to the idle thread's kstack */
  572. mr %r1, %r3
  573. bl CNAME(machdep_ap_bootstrap)
  574. TOC_RESTORE
  575. /* NOT REACHED */
  576. 6: b 6b
  577. #endif /* SMP */
  578. #if defined (BOOKE_E500)
  579. /*
  580. * Invalidate all entries in the given TLB.
  581. *
  582. * r3 TLBSEL
  583. */
  584. tlb_inval_all:
  585. rlwinm %r3, %r3, 3, (1 << 3) /* TLBSEL */
  586. ori %r3, %r3, (1 << 2) /* INVALL */
  587. tlbivax 0, %r3
  588. isync
  589. msync
  590. tlbsync
  591. msync
  592. blr
  593. /*
  594. * expects address to look up in r3, returns entry number in r29
  595. *
  596. * FIXME: the hidden assumption is we are now running in AS=0, but we should
  597. * retrieve actual AS from MSR[IS|DS] and put it in MAS6[SAS]
  598. */
  599. tlb1_find_current:
  600. mfspr %r17, SPR_PID0
  601. slwi %r17, %r17, MAS6_SPID0_SHIFT
  602. mtspr SPR_MAS6, %r17
  603. isync
  604. tlbsx 0, %r3
  605. mfspr %r17, SPR_MAS0
  606. rlwinm %r29, %r17, 16, 26, 31 /* MAS0[ESEL] -> r29 */
  607. /* Make sure we have IPROT set on the entry */
  608. mfspr %r17, SPR_MAS1
  609. oris %r17, %r17, MAS1_IPROT@h
  610. mtspr SPR_MAS1, %r17
  611. isync
  612. tlbwe
  613. isync
  614. msync
  615. blr
  616. /*
  617. * Invalidates a single entry in TLB1.
  618. *
  619. * r3 ESEL
  620. * r4-r5 scratched
  621. */
  622. tlb1_inval_entry:
  623. lis %r4, MAS0_TLBSEL1@h /* Select TLB1 */
  624. rlwimi %r4, %r3, 16, 10, 15 /* Select our entry */
  625. mtspr SPR_MAS0, %r4
  626. isync
  627. tlbre
  628. li %r5, 0 /* MAS1[V] = 0 */
  629. mtspr SPR_MAS1, %r5
  630. isync
  631. tlbwe
  632. isync
  633. msync
  634. blr
  635. /*
  636. * r29 current entry number
  637. * r28 returned temp entry
  638. * r3-r5 scratched
  639. */
  640. tlb1_temp_mapping_as1:
  641. /* Read our current translation */
  642. lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
  643. rlwimi %r3, %r29, 16, 10, 15 /* Select our current entry */
  644. mtspr SPR_MAS0, %r3
  645. isync
  646. tlbre
  647. /*
  648. * Prepare and write temp entry
  649. *
  650. * FIXME this is not robust against overflow i.e. when the current
  651. * entry is the last in TLB1
  652. */
  653. lis %r3, MAS0_TLBSEL1@h /* Select TLB1 */
  654. addi %r28, %r29, 1 /* Use next entry. */
  655. rlwimi %r3, %r28, 16, 10, 15 /* Select temp entry */
  656. mtspr SPR_MAS0, %r3
  657. isync
  658. mfspr %r5, SPR_MAS1
  659. li %r4, 1 /* AS=1 */
  660. rlwimi %r5, %r4, 12, 19, 19
  661. li %r4, 0 /* Global mapping, TID=0 */
  662. rlwimi %r5, %r4, 16, 8, 15
  663. oris %r5, %r5, (MAS1_VALID | MAS1_IPROT)@h
  664. mtspr SPR_MAS1, %r5
  665. isync
  666. mflr %r3
  667. li %r4, 0
  668. mtspr SPR_MAS7, %r4
  669. mtlr %r3
  670. isync
  671. tlbwe
  672. isync
  673. msync
  674. blr
  675. /*
  676. * Loops over TLB1, invalidates all entries skipping the one which currently
  677. * maps this code.
  678. *
  679. * r29 current entry
  680. * r3-r5 scratched
  681. */
  682. tlb1_inval_all_but_current:
  683. mfspr %r3, SPR_TLB1CFG /* Get number of entries */
  684. andi. %r3, %r3, TLBCFG_NENTRY_MASK@l
  685. li %r4, 0 /* Start from Entry 0 */
  686. 1: lis %r5, MAS0_TLBSEL1@h
  687. rlwimi %r5, %r4, 16, 10, 15
  688. mtspr SPR_MAS0, %r5
  689. isync
  690. tlbre
  691. mfspr %r5, SPR_MAS1
  692. cmpw %r4, %r29 /* our current entry? */
  693. beq 2f
  694. rlwinm %r5, %r5, 0, 2, 31 /* clear VALID and IPROT bits */
  695. mtspr SPR_MAS1, %r5
  696. isync
  697. tlbwe
  698. isync
  699. msync
  700. 2: addi %r4, %r4, 1
  701. cmpw %r4, %r3 /* Check if this is the last entry */
  702. bne 1b
  703. blr
  704. #endif
  705. #ifdef SMP
  706. .globl __boot_tlb1
  707. /*
  708. * The __boot_tlb1 table is used to hold BSP TLB1 entries
  709. * marked with _TLB_ENTRY_SHARED flag during AP bootstrap.
  710. * The BSP fills in the table in tlb_ap_prep() function. Next,
  711. * AP loads its contents to TLB1 hardware in pmap_bootstrap_ap().
  712. */
  713. __boot_tlb1:
  714. .space TLB1_MAX_ENTRIES * TLB_ENTRY_SIZE
  715. __boot_page_padding:
  716. /*
  717. * Boot page needs to be exactly 4K, with the last word of this page
  718. * acting as the reset vector, so we need to stuff the remainder.
  719. * Upon release from holdoff CPU fetches the last word of the boot
  720. * page.
  721. */
  722. .space 4092 - (__boot_page_padding - __boot_page)
  723. b __boot_page
  724. /*
  725. * This is the end of the boot page.
  726. * During AP startup, the previous instruction is at 0xfffffffc
  727. * virtual (i.e. the reset vector.)
  728. */
  729. #endif /* SMP */
  730. /************************************************************************/
  731. /* locore subroutines */
  732. /************************************************************************/
  733. /*
  734. * Cache disable/enable/inval sequences according
  735. * to section 2.16 of E500CORE RM.
  736. */
  737. ENTRY(dcache_inval)
  738. /* Invalidate d-cache */
  739. mfspr %r3, SPR_L1CSR0
  740. ori %r3, %r3, (L1CSR0_DCFI | L1CSR0_DCLFR)@l
  741. msync
  742. isync
  743. mtspr SPR_L1CSR0, %r3
  744. isync
  745. 1: mfspr %r3, SPR_L1CSR0
  746. andi. %r3, %r3, L1CSR0_DCFI
  747. bne 1b
  748. blr
  749. END(dcache_inval)
  750. ENTRY(dcache_disable)
  751. /* Disable d-cache */
  752. mfspr %r3, SPR_L1CSR0
  753. li %r4, L1CSR0_DCE@l
  754. not %r4, %r4
  755. and %r3, %r3, %r4
  756. msync
  757. isync
  758. mtspr SPR_L1CSR0, %r3
  759. isync
  760. blr
  761. END(dcache_disable)
  762. ENTRY(dcache_enable)
  763. /* Enable d-cache */
  764. mfspr %r3, SPR_L1CSR0
  765. oris %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@h
  766. ori %r3, %r3, (L1CSR0_DCPE | L1CSR0_DCE)@l
  767. msync
  768. isync
  769. mtspr SPR_L1CSR0, %r3
  770. isync
  771. blr
  772. END(dcache_enable)
  773. ENTRY(icache_inval)
  774. /* Invalidate i-cache */
  775. mfspr %r3, SPR_L1CSR1
  776. ori %r3, %r3, (L1CSR1_ICFI | L1CSR1_ICLFR)@l
  777. isync
  778. mtspr SPR_L1CSR1, %r3
  779. isync
  780. 1: mfspr %r3, SPR_L1CSR1
  781. andi. %r3, %r3, L1CSR1_ICFI
  782. bne 1b
  783. blr
  784. END(icache_inval)
  785. ENTRY(icache_disable)
  786. /* Disable i-cache */
  787. mfspr %r3, SPR_L1CSR1
  788. li %r4, L1CSR1_ICE@l
  789. not %r4, %r4
  790. and %r3, %r3, %r4
  791. isync
  792. mtspr SPR_L1CSR1, %r3
  793. isync
  794. blr
  795. END(icache_disable)
  796. ENTRY(icache_enable)
  797. /* Enable i-cache */
  798. mfspr %r3, SPR_L1CSR1
  799. oris %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@h
  800. ori %r3, %r3, (L1CSR1_ICPE | L1CSR1_ICE)@l
  801. isync
  802. mtspr SPR_L1CSR1, %r3
  803. isync
  804. blr
  805. END(icache_enable)
  806. /*
  807. * L2 cache disable/enable/inval sequences for E500mc.
  808. */
  809. ENTRY(l2cache_inval)
  810. mfspr %r3, SPR_L2CSR0
  811. oris %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@h
  812. ori %r3, %r3, (L2CSR0_L2FI | L2CSR0_L2LFC)@l
  813. isync
  814. mtspr SPR_L2CSR0, %r3
  815. isync
  816. 1: mfspr %r3, SPR_L2CSR0
  817. andis. %r3, %r3, L2CSR0_L2FI@h
  818. bne 1b
  819. blr
  820. END(l2cache_inval)
  821. ENTRY(l2cache_enable)
  822. mfspr %r3, SPR_L2CSR0
  823. oris %r3, %r3, (L2CSR0_L2E | L2CSR0_L2PE)@h
  824. isync
  825. mtspr SPR_L2CSR0, %r3
  826. isync
  827. blr
  828. END(l2cache_enable)
  829. /*
  830. * Branch predictor setup.
  831. */
  832. ENTRY(bpred_enable)
  833. mfspr %r3, SPR_BUCSR
  834. ori %r3, %r3, BUCSR_BBFI
  835. isync
  836. mtspr SPR_BUCSR, %r3
  837. isync
  838. ori %r3, %r3, BUCSR_BPEN
  839. isync
  840. mtspr SPR_BUCSR, %r3
  841. isync
  842. blr
  843. END(bpred_enable)
  844. /*
  845. * XXX: This should be moved to a shared AIM/booke asm file, if one ever is
  846. * created.
  847. */
  848. ENTRY(get_spr)
  849. /* Note: The spr number is patched at runtime */
  850. mfspr %r3, 0
  851. blr
  852. END(get_spr)
  853. /************************************************************************/
  854. /* Data section */
  855. /************************************************************************/
  856. .data
  857. .align 3
  858. GLOBAL(__startkernel)
  859. ADDR(begin)
  860. GLOBAL(__endkernel)
  861. ADDR(end)
  862. .align 4
  863. tmpstack:
  864. .space TMPSTACKSZ
  865. tmpstackbound:
  866. .space 10240 /* XXX: this really should not be necessary */
  867. #ifdef __powerpc64__
  868. TOC_ENTRY(tmpstack)
  869. #ifdef SMP
  870. TOC_ENTRY(bp_kernload)
  871. #endif
  872. #endif
  873. /*
  874. * Compiled KERNBASE locations
  875. */
  876. .globl kernbase
  877. .set kernbase, KERNBASE
  878. #include <powerpc/booke/trap_subr.S>