setup.c 9.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (C) 2005-2017 Andes Technology Corporation
  3. #include <linux/cpu.h>
  4. #include <linux/bootmem.h>
  5. #include <linux/seq_file.h>
  6. #include <linux/memblock.h>
  7. #include <linux/console.h>
  8. #include <linux/screen_info.h>
  9. #include <linux/delay.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/of_fdt.h>
  12. #include <linux/of_platform.h>
  13. #include <asm/setup.h>
  14. #include <asm/sections.h>
  15. #include <asm/proc-fns.h>
  16. #include <asm/cache_info.h>
  17. #include <asm/elf.h>
  18. #include <nds32_intrinsic.h>
  19. #define HWCAP_MFUSR_PC 0x000001
  20. #define HWCAP_EXT 0x000002
  21. #define HWCAP_EXT2 0x000004
  22. #define HWCAP_FPU 0x000008
  23. #define HWCAP_AUDIO 0x000010
  24. #define HWCAP_BASE16 0x000020
  25. #define HWCAP_STRING 0x000040
  26. #define HWCAP_REDUCED_REGS 0x000080
  27. #define HWCAP_VIDEO 0x000100
  28. #define HWCAP_ENCRYPT 0x000200
  29. #define HWCAP_EDM 0x000400
  30. #define HWCAP_LMDMA 0x000800
  31. #define HWCAP_PFM 0x001000
  32. #define HWCAP_HSMP 0x002000
  33. #define HWCAP_TRACE 0x004000
  34. #define HWCAP_DIV 0x008000
  35. #define HWCAP_MAC 0x010000
  36. #define HWCAP_L2C 0x020000
  37. #define HWCAP_FPU_DP 0x040000
  38. #define HWCAP_V2 0x080000
  39. #define HWCAP_DX_REGS 0x100000
  40. unsigned long cpu_id, cpu_rev, cpu_cfgid;
  41. char cpu_series;
  42. char *endianness = NULL;
  43. unsigned int __atags_pointer __initdata;
  44. unsigned int elf_hwcap;
  45. EXPORT_SYMBOL(elf_hwcap);
  46. /*
  47. * The following string table, must sync with HWCAP_xx bitmask,
  48. * which is defined in <asm/procinfo.h>
  49. */
  50. static const char *hwcap_str[] = {
  51. "mfusr_pc",
  52. "perf1",
  53. "perf2",
  54. "fpu",
  55. "audio",
  56. "16b",
  57. "string",
  58. "reduced_regs",
  59. "video",
  60. "encrypt",
  61. "edm",
  62. "lmdma",
  63. "pfm",
  64. "hsmp",
  65. "trace",
  66. "div",
  67. "mac",
  68. "l2c",
  69. "fpu_dp",
  70. "v2",
  71. "dx_regs",
  72. NULL,
  73. };
  74. #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
  75. #define WRITE_METHOD "write through"
  76. #else
  77. #define WRITE_METHOD "write back"
  78. #endif
  79. struct cache_info L1_cache_info[2];
  80. static void __init dump_cpu_info(int cpu)
  81. {
  82. int i, p = 0;
  83. char str[sizeof(hwcap_str) + 16];
  84. for (i = 0; hwcap_str[i]; i++) {
  85. if (elf_hwcap & (1 << i)) {
  86. sprintf(str + p, "%s ", hwcap_str[i]);
  87. p += strlen(hwcap_str[i]) + 1;
  88. }
  89. }
  90. pr_info("CPU%d Features: %s\n", cpu, str);
  91. L1_cache_info[ICACHE].ways = CACHE_WAY(ICACHE);
  92. L1_cache_info[ICACHE].line_size = CACHE_LINE_SIZE(ICACHE);
  93. L1_cache_info[ICACHE].sets = CACHE_SET(ICACHE);
  94. L1_cache_info[ICACHE].size =
  95. L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].line_size *
  96. L1_cache_info[ICACHE].sets / 1024;
  97. pr_info("L1I:%dKB/%dS/%dW/%dB\n", L1_cache_info[ICACHE].size,
  98. L1_cache_info[ICACHE].sets, L1_cache_info[ICACHE].ways,
  99. L1_cache_info[ICACHE].line_size);
  100. L1_cache_info[DCACHE].ways = CACHE_WAY(DCACHE);
  101. L1_cache_info[DCACHE].line_size = CACHE_LINE_SIZE(DCACHE);
  102. L1_cache_info[DCACHE].sets = CACHE_SET(DCACHE);
  103. L1_cache_info[DCACHE].size =
  104. L1_cache_info[DCACHE].ways * L1_cache_info[DCACHE].line_size *
  105. L1_cache_info[DCACHE].sets / 1024;
  106. pr_info("L1D:%dKB/%dS/%dW/%dB\n", L1_cache_info[DCACHE].size,
  107. L1_cache_info[DCACHE].sets, L1_cache_info[DCACHE].ways,
  108. L1_cache_info[DCACHE].line_size);
  109. pr_info("L1 D-Cache is %s\n", WRITE_METHOD);
  110. if (L1_cache_info[DCACHE].size != L1_CACHE_BYTES)
  111. pr_crit
  112. ("The cache line size(%d) of this processor is not the same as L1_CACHE_BYTES(%d).\n",
  113. L1_cache_info[DCACHE].size, L1_CACHE_BYTES);
  114. #ifdef CONFIG_CPU_CACHE_ALIASING
  115. {
  116. int aliasing_num;
  117. aliasing_num =
  118. L1_cache_info[ICACHE].size * 1024 / PAGE_SIZE /
  119. L1_cache_info[ICACHE].ways;
  120. L1_cache_info[ICACHE].aliasing_num = aliasing_num;
  121. L1_cache_info[ICACHE].aliasing_mask =
  122. (aliasing_num - 1) << PAGE_SHIFT;
  123. aliasing_num =
  124. L1_cache_info[DCACHE].size * 1024 / PAGE_SIZE /
  125. L1_cache_info[DCACHE].ways;
  126. L1_cache_info[DCACHE].aliasing_num = aliasing_num;
  127. L1_cache_info[DCACHE].aliasing_mask =
  128. (aliasing_num - 1) << PAGE_SHIFT;
  129. }
  130. #endif
  131. }
  132. static void __init setup_cpuinfo(void)
  133. {
  134. unsigned long tmp = 0, cpu_name;
  135. cpu_dcache_inval_all();
  136. cpu_icache_inval_all();
  137. __nds32__isb();
  138. cpu_id = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskCPUID) >> CPU_VER_offCPUID;
  139. cpu_name = ((cpu_id) & 0xf0) >> 4;
  140. cpu_series = cpu_name ? cpu_name - 10 + 'A' : 'N';
  141. cpu_id = cpu_id & 0xf;
  142. cpu_rev = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskREV) >> CPU_VER_offREV;
  143. cpu_cfgid = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskCFGID) >> CPU_VER_offCFGID;
  144. pr_info("CPU:%c%ld, CPU_VER 0x%08x(id %lu, rev %lu, cfg %lu)\n",
  145. cpu_series, cpu_id, __nds32__mfsr(NDS32_SR_CPU_VER), cpu_id, cpu_rev, cpu_cfgid);
  146. elf_hwcap |= HWCAP_MFUSR_PC;
  147. if (((__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskBASEV) >> MSC_CFG_offBASEV) == 0) {
  148. if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskDIV)
  149. elf_hwcap |= HWCAP_DIV;
  150. if ((__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskMAC)
  151. || (cpu_id == 12 && cpu_rev < 4))
  152. elf_hwcap |= HWCAP_MAC;
  153. } else {
  154. elf_hwcap |= HWCAP_V2;
  155. elf_hwcap |= HWCAP_DIV;
  156. elf_hwcap |= HWCAP_MAC;
  157. }
  158. if (cpu_cfgid & 0x0001)
  159. elf_hwcap |= HWCAP_EXT;
  160. if (cpu_cfgid & 0x0002)
  161. elf_hwcap |= HWCAP_BASE16;
  162. if (cpu_cfgid & 0x0004)
  163. elf_hwcap |= HWCAP_EXT2;
  164. if (cpu_cfgid & 0x0008)
  165. elf_hwcap |= HWCAP_FPU;
  166. if (cpu_cfgid & 0x0010)
  167. elf_hwcap |= HWCAP_STRING;
  168. if (__nds32__mfsr(NDS32_SR_MMU_CFG) & MMU_CFG_mskDE)
  169. endianness = "MSB";
  170. else
  171. endianness = "LSB";
  172. if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskEDM)
  173. elf_hwcap |= HWCAP_EDM;
  174. if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskLMDMA)
  175. elf_hwcap |= HWCAP_LMDMA;
  176. if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskPFM)
  177. elf_hwcap |= HWCAP_PFM;
  178. if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskHSMP)
  179. elf_hwcap |= HWCAP_HSMP;
  180. if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskTRACE)
  181. elf_hwcap |= HWCAP_TRACE;
  182. if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskAUDIO)
  183. elf_hwcap |= HWCAP_AUDIO;
  184. if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskL2C)
  185. elf_hwcap |= HWCAP_L2C;
  186. tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
  187. if (!IS_ENABLED(CONFIG_CPU_DCACHE_DISABLE))
  188. tmp |= CACHE_CTL_mskDC_EN;
  189. if (!IS_ENABLED(CONFIG_CPU_ICACHE_DISABLE))
  190. tmp |= CACHE_CTL_mskIC_EN;
  191. __nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
  192. dump_cpu_info(smp_processor_id());
  193. }
  194. static void __init setup_memory(void)
  195. {
  196. unsigned long ram_start_pfn;
  197. unsigned long free_ram_start_pfn;
  198. phys_addr_t memory_start, memory_end;
  199. struct memblock_region *region;
  200. memory_end = memory_start = 0;
  201. /* Find main memory where is the kernel */
  202. for_each_memblock(memory, region) {
  203. memory_start = region->base;
  204. memory_end = region->base + region->size;
  205. pr_info("%s: Memory: 0x%x-0x%x\n", __func__,
  206. memory_start, memory_end);
  207. }
  208. if (!memory_end) {
  209. panic("No memory!");
  210. }
  211. ram_start_pfn = PFN_UP(memblock_start_of_DRAM());
  212. /* free_ram_start_pfn is first page after kernel */
  213. free_ram_start_pfn = PFN_UP(__pa(&_end));
  214. max_pfn = PFN_DOWN(memblock_end_of_DRAM());
  215. /* it could update max_pfn */
  216. if (max_pfn - ram_start_pfn <= MAXMEM_PFN)
  217. max_low_pfn = max_pfn;
  218. else {
  219. max_low_pfn = MAXMEM_PFN + ram_start_pfn;
  220. if (!IS_ENABLED(CONFIG_HIGHMEM))
  221. max_pfn = MAXMEM_PFN + ram_start_pfn;
  222. }
  223. /* high_memory is related with VMALLOC */
  224. high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
  225. min_low_pfn = free_ram_start_pfn;
  226. /*
  227. * initialize the boot-time allocator (with low memory only).
  228. *
  229. * This makes the memory from the end of the kernel to the end of
  230. * RAM usable.
  231. */
  232. memblock_set_bottom_up(true);
  233. memblock_reserve(PFN_PHYS(ram_start_pfn), PFN_PHYS(free_ram_start_pfn - ram_start_pfn));
  234. early_init_fdt_reserve_self();
  235. early_init_fdt_scan_reserved_mem();
  236. memblock_dump_all();
  237. }
  238. void __init setup_arch(char **cmdline_p)
  239. {
  240. early_init_devtree(__atags_pointer ? \
  241. phys_to_virt(__atags_pointer) : __dtb_start);
  242. setup_cpuinfo();
  243. init_mm.start_code = (unsigned long)&_stext;
  244. init_mm.end_code = (unsigned long)&_etext;
  245. init_mm.end_data = (unsigned long)&_edata;
  246. init_mm.brk = (unsigned long)&_end;
  247. /* setup bootmem allocator */
  248. setup_memory();
  249. /* paging_init() sets up the MMU and marks all pages as reserved */
  250. paging_init();
  251. /* invalidate all TLB entries because the new mapping is created */
  252. __nds32__tlbop_flua();
  253. /* use generic way to parse */
  254. parse_early_param();
  255. unflatten_and_copy_device_tree();
  256. if(IS_ENABLED(CONFIG_VT)) {
  257. if(IS_ENABLED(CONFIG_DUMMY_CONSOLE))
  258. conswitchp = &dummy_con;
  259. }
  260. *cmdline_p = boot_command_line;
  261. early_trap_init();
  262. }
  263. static int c_show(struct seq_file *m, void *v)
  264. {
  265. int i;
  266. seq_printf(m, "Processor\t: %c%ld (id %lu, rev %lu, cfg %lu)\n",
  267. cpu_series, cpu_id, cpu_id, cpu_rev, cpu_cfgid);
  268. seq_printf(m, "L1I\t\t: %luKB/%luS/%luW/%luB\n",
  269. CACHE_SET(ICACHE) * CACHE_WAY(ICACHE) *
  270. CACHE_LINE_SIZE(ICACHE) / 1024, CACHE_SET(ICACHE),
  271. CACHE_WAY(ICACHE), CACHE_LINE_SIZE(ICACHE));
  272. seq_printf(m, "L1D\t\t: %luKB/%luS/%luW/%luB\n",
  273. CACHE_SET(DCACHE) * CACHE_WAY(DCACHE) *
  274. CACHE_LINE_SIZE(DCACHE) / 1024, CACHE_SET(DCACHE),
  275. CACHE_WAY(DCACHE), CACHE_LINE_SIZE(DCACHE));
  276. seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
  277. loops_per_jiffy / (500000 / HZ),
  278. (loops_per_jiffy / (5000 / HZ)) % 100);
  279. /* dump out the processor features */
  280. seq_puts(m, "Features\t: ");
  281. for (i = 0; hwcap_str[i]; i++)
  282. if (elf_hwcap & (1 << i))
  283. seq_printf(m, "%s ", hwcap_str[i]);
  284. seq_puts(m, "\n\n");
  285. return 0;
  286. }
  287. static void *c_start(struct seq_file *m, loff_t * pos)
  288. {
  289. return *pos < 1 ? (void *)1 : NULL;
  290. }
  291. static void *c_next(struct seq_file *m, void *v, loff_t * pos)
  292. {
  293. ++*pos;
  294. return NULL;
  295. }
  296. static void c_stop(struct seq_file *m, void *v)
  297. {
  298. }
  299. struct seq_operations cpuinfo_op = {
  300. .start = c_start,
  301. .next = c_next,
  302. .stop = c_stop,
  303. .show = c_show
  304. };