vmlinux.lds.S 8.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355
  1. #ifdef CONFIG_PPC64
  2. #define PROVIDE32(x) PROVIDE(__unused__##x)
  3. #else
  4. #define PROVIDE32(x) PROVIDE(x)
  5. #endif
  6. #include <asm/page.h>
  7. #include <asm-generic/vmlinux.lds.h>
  8. #include <asm/cache.h>
  9. #include <asm/thread_info.h>
  10. ENTRY(_stext)
  11. PHDRS {
  12. kernel PT_LOAD FLAGS(7); /* RWX */
  13. notes PT_NOTE FLAGS(0);
  14. dummy PT_NOTE FLAGS(0);
  15. /* binutils < 2.18 has a bug that makes it misbehave when taking an
  16. ELF file with all segments at load address 0 as input. This
  17. happens when running "strip" on vmlinux, because of the AT() magic
  18. in this linker script. People using GCC >= 4.2 won't run into
  19. this problem, because the "build-id" support will put some data
  20. into the "notes" segment (at a non-zero load address).
  21. To work around this, we force some data into both the "dummy"
  22. segment and the kernel segment, so the dummy segment will get a
  23. non-zero load address. It's not enough to always create the
  24. "notes" segment, since if nothing gets assigned to it, its load
  25. address will be zero. */
  26. }
  27. #ifdef CONFIG_PPC64
  28. OUTPUT_ARCH(powerpc:common64)
  29. jiffies = jiffies_64;
  30. #else
  31. OUTPUT_ARCH(powerpc:common)
  32. jiffies = jiffies_64 + 4;
  33. #endif
  34. SECTIONS
  35. {
  36. . = KERNELBASE;
  37. /*
  38. * Text, read only data and other permanent read-only sections
  39. */
  40. _text = .;
  41. _stext = .;
  42. /*
  43. * Head text.
  44. * This needs to be in its own output section to avoid ld placing
  45. * branch trampoline stubs randomly throughout the fixed sections,
  46. * which it will do (even if the branch comes from another section)
  47. * in order to optimize stub generation.
  48. */
  49. .head.text : AT(ADDR(.head.text) - LOAD_OFFSET) {
  50. #ifdef CONFIG_PPC64
  51. KEEP(*(.head.text.first_256B));
  52. #ifdef CONFIG_PPC_BOOK3E
  53. # define END_FIXED 0x100
  54. #else
  55. KEEP(*(.head.text.real_vectors));
  56. *(.head.text.real_trampolines);
  57. KEEP(*(.head.text.virt_vectors));
  58. *(.head.text.virt_trampolines);
  59. # if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
  60. KEEP(*(.head.data.fwnmi_page));
  61. # define END_FIXED 0x8000
  62. # else
  63. # define END_FIXED 0x7000
  64. # endif
  65. #endif
  66. ASSERT((. == END_FIXED), "vmlinux.lds.S: fixed section overflow error");
  67. #else /* !CONFIG_PPC64 */
  68. HEAD_TEXT
  69. #endif
  70. } :kernel
  71. /*
  72. * If the build dies here, it's likely code in head_64.S is referencing
  73. * labels it can't reach, and the linker inserting stubs without the
  74. * assembler's knowledge. To debug, remove the above assert and
  75. * rebuild. Look for branch stubs in the fixed section region.
  76. *
  77. * Linker stub generation could be allowed in "trampoline"
  78. * sections if absolutely necessary, but this would require
  79. * some rework of the fixed sections. Before resorting to this,
  80. * consider references that have sufficient addressing range,
  81. * (e.g., hand coded trampolines) so the linker does not have
  82. * to add stubs.
  83. *
  84. * Linker stubs at the top of the main text section are currently not
  85. * detected, and will result in a crash at boot due to offsets being
  86. * wrong.
  87. */
  88. #ifdef CONFIG_PPC64
  89. /*
  90. * BLOCK(0) overrides the default output section alignment because
  91. * this needs to start right after .head.text in order for fixed
  92. * section placement to work.
  93. */
  94. .text BLOCK(0) : AT(ADDR(.text) - LOAD_OFFSET) {
  95. #else
  96. .text : AT(ADDR(.text) - LOAD_OFFSET) {
  97. ALIGN_FUNCTION();
  98. #endif
  99. /* careful! __ftr_alt_* sections need to be close to .text */
  100. *(.text .fixup __ftr_alt_* .ref.text)
  101. SCHED_TEXT
  102. CPUIDLE_TEXT
  103. LOCK_TEXT
  104. KPROBES_TEXT
  105. IRQENTRY_TEXT
  106. SOFTIRQENTRY_TEXT
  107. MEM_KEEP(init.text)
  108. MEM_KEEP(exit.text)
  109. #ifdef CONFIG_PPC32
  110. *(.got1)
  111. __got2_start = .;
  112. *(.got2)
  113. __got2_end = .;
  114. #endif /* CONFIG_PPC32 */
  115. } :kernel
  116. . = ALIGN(PAGE_SIZE);
  117. _etext = .;
  118. PROVIDE32 (etext = .);
  119. /* Read-only data */
  120. RODATA
  121. #ifdef CONFIG_PPC64
  122. . = ALIGN(8);
  123. __stf_entry_barrier_fixup : AT(ADDR(__stf_entry_barrier_fixup) - LOAD_OFFSET) {
  124. __start___stf_entry_barrier_fixup = .;
  125. *(__stf_entry_barrier_fixup)
  126. __stop___stf_entry_barrier_fixup = .;
  127. }
  128. . = ALIGN(8);
  129. __stf_exit_barrier_fixup : AT(ADDR(__stf_exit_barrier_fixup) - LOAD_OFFSET) {
  130. __start___stf_exit_barrier_fixup = .;
  131. *(__stf_exit_barrier_fixup)
  132. __stop___stf_exit_barrier_fixup = .;
  133. }
  134. . = ALIGN(8);
  135. __rfi_flush_fixup : AT(ADDR(__rfi_flush_fixup) - LOAD_OFFSET) {
  136. __start___rfi_flush_fixup = .;
  137. *(__rfi_flush_fixup)
  138. __stop___rfi_flush_fixup = .;
  139. }
  140. #endif
  141. EXCEPTION_TABLE(0)
  142. NOTES :kernel :notes
  143. /* The dummy segment contents for the bug workaround mentioned above
  144. near PHDRS. */
  145. .dummy : AT(ADDR(.dummy) - LOAD_OFFSET) {
  146. LONG(0)
  147. LONG(0)
  148. LONG(0)
  149. } :kernel :dummy
  150. /*
  151. * Init sections discarded at runtime
  152. */
  153. . = ALIGN(PAGE_SIZE);
  154. __init_begin = .;
  155. INIT_TEXT_SECTION(PAGE_SIZE) :kernel
  156. /* .exit.text is discarded at runtime, not link time,
  157. * to deal with references from __bug_table
  158. */
  159. .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
  160. EXIT_TEXT
  161. }
  162. .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
  163. INIT_DATA
  164. __vtop_table_begin = .;
  165. *(.vtop_fixup);
  166. __vtop_table_end = .;
  167. __ptov_table_begin = .;
  168. *(.ptov_fixup);
  169. __ptov_table_end = .;
  170. }
  171. .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
  172. INIT_SETUP(16)
  173. }
  174. .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET) {
  175. INIT_CALLS
  176. }
  177. .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
  178. CON_INITCALL
  179. }
  180. SECURITY_INIT
  181. . = ALIGN(8);
  182. __ftr_fixup : AT(ADDR(__ftr_fixup) - LOAD_OFFSET) {
  183. __start___ftr_fixup = .;
  184. *(__ftr_fixup)
  185. __stop___ftr_fixup = .;
  186. }
  187. . = ALIGN(8);
  188. __mmu_ftr_fixup : AT(ADDR(__mmu_ftr_fixup) - LOAD_OFFSET) {
  189. __start___mmu_ftr_fixup = .;
  190. *(__mmu_ftr_fixup)
  191. __stop___mmu_ftr_fixup = .;
  192. }
  193. . = ALIGN(8);
  194. __lwsync_fixup : AT(ADDR(__lwsync_fixup) - LOAD_OFFSET) {
  195. __start___lwsync_fixup = .;
  196. *(__lwsync_fixup)
  197. __stop___lwsync_fixup = .;
  198. }
  199. #ifdef CONFIG_PPC64
  200. . = ALIGN(8);
  201. __fw_ftr_fixup : AT(ADDR(__fw_ftr_fixup) - LOAD_OFFSET) {
  202. __start___fw_ftr_fixup = .;
  203. *(__fw_ftr_fixup)
  204. __stop___fw_ftr_fixup = .;
  205. }
  206. #endif
  207. .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
  208. INIT_RAM_FS
  209. }
  210. PERCPU_SECTION(L1_CACHE_BYTES)
  211. . = ALIGN(8);
  212. .machine.desc : AT(ADDR(.machine.desc) - LOAD_OFFSET) {
  213. __machine_desc_start = . ;
  214. *(.machine.desc)
  215. __machine_desc_end = . ;
  216. }
  217. #ifdef CONFIG_RELOCATABLE
  218. . = ALIGN(8);
  219. .dynsym : AT(ADDR(.dynsym) - LOAD_OFFSET)
  220. {
  221. #ifdef CONFIG_PPC32
  222. __dynamic_symtab = .;
  223. #endif
  224. *(.dynsym)
  225. }
  226. .dynstr : AT(ADDR(.dynstr) - LOAD_OFFSET) { *(.dynstr) }
  227. .dynamic : AT(ADDR(.dynamic) - LOAD_OFFSET)
  228. {
  229. __dynamic_start = .;
  230. *(.dynamic)
  231. }
  232. .hash : AT(ADDR(.hash) - LOAD_OFFSET) { *(.hash) }
  233. .interp : AT(ADDR(.interp) - LOAD_OFFSET) { *(.interp) }
  234. .rela.dyn : AT(ADDR(.rela.dyn) - LOAD_OFFSET)
  235. {
  236. __rela_dyn_start = .;
  237. *(.rela*)
  238. }
  239. #endif
  240. /* .exit.data is discarded at runtime, not link time,
  241. * to deal with references from .exit.text
  242. */
  243. .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
  244. EXIT_DATA
  245. }
  246. /* freed after init ends here */
  247. . = ALIGN(PAGE_SIZE);
  248. __init_end = .;
  249. /*
  250. * And now the various read/write data
  251. */
  252. . = ALIGN(PAGE_SIZE);
  253. _sdata = .;
  254. #ifdef CONFIG_PPC32
  255. .data : AT(ADDR(.data) - LOAD_OFFSET) {
  256. DATA_DATA
  257. *(.sdata)
  258. *(.got.plt) *(.got)
  259. }
  260. #else
  261. .data : AT(ADDR(.data) - LOAD_OFFSET) {
  262. DATA_DATA
  263. *(.data.rel*)
  264. *(.toc1)
  265. *(.branch_lt)
  266. }
  267. .opd : AT(ADDR(.opd) - LOAD_OFFSET) {
  268. *(.opd)
  269. }
  270. . = ALIGN(256);
  271. .got : AT(ADDR(.got) - LOAD_OFFSET) {
  272. __toc_start = .;
  273. #ifndef CONFIG_RELOCATABLE
  274. __prom_init_toc_start = .;
  275. arch/powerpc/kernel/prom_init.o*(.toc .got)
  276. __prom_init_toc_end = .;
  277. #endif
  278. *(.got)
  279. *(.toc)
  280. }
  281. #endif
  282. /* The initial task and kernel stack */
  283. INIT_TASK_DATA_SECTION(THREAD_SIZE)
  284. .data..page_aligned : AT(ADDR(.data..page_aligned) - LOAD_OFFSET) {
  285. PAGE_ALIGNED_DATA(PAGE_SIZE)
  286. }
  287. .data..cacheline_aligned : AT(ADDR(.data..cacheline_aligned) - LOAD_OFFSET) {
  288. CACHELINE_ALIGNED_DATA(L1_CACHE_BYTES)
  289. }
  290. .data..read_mostly : AT(ADDR(.data..read_mostly) - LOAD_OFFSET) {
  291. READ_MOSTLY_DATA(L1_CACHE_BYTES)
  292. }
  293. . = ALIGN(PAGE_SIZE);
  294. .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
  295. NOSAVE_DATA
  296. }
  297. . = ALIGN(PAGE_SIZE);
  298. _edata = .;
  299. PROVIDE32 (edata = .);
  300. /*
  301. * And finally the bss
  302. */
  303. BSS_SECTION(0, 0, 0)
  304. . = ALIGN(PAGE_SIZE);
  305. _end = . ;
  306. PROVIDE32 (end = .);
  307. /* Sections to be discarded. */
  308. DISCARDS
  309. }