vmlinux.lds.S 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. /*
  2. * Copyright 2004-2009 Analog Devices Inc.
  3. *
  4. * Licensed under the GPL-2 or later
  5. */
  6. #include <asm-generic/vmlinux.lds.h>
  7. #include <asm/mem_map.h>
  8. #include <asm/page.h>
  9. #include <asm/thread_info.h>
  10. OUTPUT_FORMAT("elf32-bfin")
  11. ENTRY(__start)
  12. _jiffies = _jiffies_64;
  13. SECTIONS
  14. {
  15. #ifdef CONFIG_RAMKERNEL
  16. . = CONFIG_BOOT_LOAD;
  17. #else
  18. . = CONFIG_ROM_BASE;
  19. #endif
  20. /* Neither the text, ro_data or bss section need to be aligned
  21. * So pack them back to back
  22. */
  23. .text :
  24. {
  25. __text = .;
  26. _text = .;
  27. __stext = .;
  28. TEXT_TEXT
  29. #ifndef CONFIG_SCHEDULE_L1
  30. SCHED_TEXT
  31. #endif
  32. CPUIDLE_TEXT
  33. LOCK_TEXT
  34. IRQENTRY_TEXT
  35. SOFTIRQENTRY_TEXT
  36. KPROBES_TEXT
  37. #ifdef CONFIG_ROMKERNEL
  38. __sinittext = .;
  39. INIT_TEXT
  40. __einittext = .;
  41. EXIT_TEXT
  42. #endif
  43. *(.text.*)
  44. *(.fixup)
  45. #if !L1_CODE_LENGTH
  46. *(.l1.text)
  47. #endif
  48. __etext = .;
  49. }
  50. EXCEPTION_TABLE(4)
  51. NOTES
  52. /* Just in case the first read only is a 32-bit access */
  53. RO_DATA(4)
  54. __rodata_end = .;
  55. #ifdef CONFIG_ROMKERNEL
  56. . = CONFIG_BOOT_LOAD;
  57. .bss : AT(__rodata_end)
  58. #else
  59. .bss :
  60. #endif
  61. {
  62. . = ALIGN(4);
  63. ___bss_start = .;
  64. *(.bss .bss.*)
  65. *(COMMON)
  66. #if !L1_DATA_A_LENGTH
  67. *(.l1.bss)
  68. #endif
  69. #if !L1_DATA_B_LENGTH
  70. *(.l1.bss.B)
  71. #endif
  72. . = ALIGN(4);
  73. ___bss_stop = .;
  74. }
  75. #if defined(CONFIG_ROMKERNEL)
  76. .data : AT(LOADADDR(.bss) + SIZEOF(.bss))
  77. #else
  78. .data :
  79. #endif
  80. {
  81. __sdata = .;
  82. /* This gets done first, so the glob doesn't suck it in */
  83. CACHELINE_ALIGNED_DATA(32)
  84. #if !L1_DATA_A_LENGTH
  85. . = ALIGN(32);
  86. *(.data_l1.cacheline_aligned)
  87. *(.l1.data)
  88. #endif
  89. #if !L1_DATA_B_LENGTH
  90. *(.l1.data.B)
  91. #endif
  92. #if !L2_LENGTH
  93. . = ALIGN(32);
  94. *(.data_l2.cacheline_aligned)
  95. *(.l2.data)
  96. #endif
  97. DATA_DATA
  98. CONSTRUCTORS
  99. INIT_TASK_DATA(THREAD_SIZE)
  100. __edata = .;
  101. }
  102. __data_lma = LOADADDR(.data);
  103. __data_len = SIZEOF(.data);
  104. /* The init section should be last, so when we free it, it goes into
  105. * the general memory pool, and (hopefully) will decrease fragmentation
  106. * a tiny bit. The init section has a _requirement_ that it be
  107. * PAGE_SIZE aligned
  108. */
  109. . = ALIGN(PAGE_SIZE);
  110. ___init_begin = .;
  111. #ifdef CONFIG_RAMKERNEL
  112. INIT_TEXT_SECTION(PAGE_SIZE)
  113. /* We have to discard exit text and such at runtime, not link time, to
  114. * handle embedded cross-section references (alt instructions, bug
  115. * table, eh_frame, etc...). We need all of our .text up front and
  116. * .data after it for PCREL call issues.
  117. */
  118. .exit.text :
  119. {
  120. EXIT_TEXT
  121. }
  122. . = ALIGN(16);
  123. INIT_DATA_SECTION(16)
  124. PERCPU_SECTION(32)
  125. .exit.data :
  126. {
  127. EXIT_DATA
  128. }
  129. .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
  130. #else
  131. .init.data : AT(__data_lma + __data_len + 32)
  132. {
  133. __sinitdata = .;
  134. INIT_DATA
  135. INIT_SETUP(16)
  136. INIT_CALLS
  137. CON_INITCALL
  138. SECURITY_INITCALL
  139. INIT_RAM_FS
  140. . = ALIGN(PAGE_SIZE);
  141. ___per_cpu_load = .;
  142. PERCPU_INPUT(32)
  143. EXIT_DATA
  144. __einitdata = .;
  145. }
  146. __init_data_lma = LOADADDR(.init.data);
  147. __init_data_len = SIZEOF(.init.data);
  148. __init_data_end = .;
  149. .text_l1 L1_CODE_START : AT(__init_data_lma + __init_data_len)
  150. #endif
  151. {
  152. . = ALIGN(4);
  153. __stext_l1 = .;
  154. *(.l1.text.head)
  155. *(.l1.text)
  156. #ifdef CONFIG_SCHEDULE_L1
  157. SCHED_TEXT
  158. #endif
  159. . = ALIGN(4);
  160. __etext_l1 = .;
  161. }
  162. __text_l1_lma = LOADADDR(.text_l1);
  163. __text_l1_len = SIZEOF(.text_l1);
  164. ASSERT (__text_l1_len <= L1_CODE_LENGTH, "L1 text overflow!")
  165. .data_l1 L1_DATA_A_START : AT(__text_l1_lma + __text_l1_len)
  166. {
  167. . = ALIGN(4);
  168. __sdata_l1 = .;
  169. *(.l1.data)
  170. __edata_l1 = .;
  171. . = ALIGN(32);
  172. *(.data_l1.cacheline_aligned)
  173. . = ALIGN(4);
  174. __sbss_l1 = .;
  175. *(.l1.bss)
  176. . = ALIGN(4);
  177. __ebss_l1 = .;
  178. }
  179. __data_l1_lma = LOADADDR(.data_l1);
  180. __data_l1_len = SIZEOF(.data_l1);
  181. ASSERT (__data_l1_len <= L1_DATA_A_LENGTH, "L1 data A overflow!")
  182. .data_b_l1 L1_DATA_B_START : AT(__data_l1_lma + __data_l1_len)
  183. {
  184. . = ALIGN(4);
  185. __sdata_b_l1 = .;
  186. *(.l1.data.B)
  187. __edata_b_l1 = .;
  188. . = ALIGN(4);
  189. __sbss_b_l1 = .;
  190. *(.l1.bss.B)
  191. . = ALIGN(4);
  192. __ebss_b_l1 = .;
  193. }
  194. __data_b_l1_lma = LOADADDR(.data_b_l1);
  195. __data_b_l1_len = SIZEOF(.data_b_l1);
  196. ASSERT (__data_b_l1_len <= L1_DATA_B_LENGTH, "L1 data B overflow!")
  197. .text_data_l2 L2_START : AT(__data_b_l1_lma + __data_b_l1_len)
  198. {
  199. . = ALIGN(4);
  200. __stext_l2 = .;
  201. *(.l2.text)
  202. . = ALIGN(4);
  203. __etext_l2 = .;
  204. . = ALIGN(4);
  205. __sdata_l2 = .;
  206. *(.l2.data)
  207. __edata_l2 = .;
  208. . = ALIGN(32);
  209. *(.data_l2.cacheline_aligned)
  210. . = ALIGN(4);
  211. __sbss_l2 = .;
  212. *(.l2.bss)
  213. . = ALIGN(4);
  214. __ebss_l2 = .;
  215. }
  216. __l2_lma = LOADADDR(.text_data_l2);
  217. __l2_len = SIZEOF(.text_data_l2);
  218. ASSERT (__l2_len <= L2_LENGTH, "L2 overflow!")
  219. /* Force trailing alignment of our init section so that when we
  220. * free our init memory, we don't leave behind a partial page.
  221. */
  222. #ifdef CONFIG_RAMKERNEL
  223. . = __l2_lma + __l2_len;
  224. #else
  225. . = __init_data_end;
  226. #endif
  227. . = ALIGN(PAGE_SIZE);
  228. ___init_end = .;
  229. __end =.;
  230. STABS_DEBUG
  231. DWARF_DEBUG
  232. DISCARDS
  233. }