cpuinfo.c 7.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257
  1. /*
  2. * Record and handle CPU attributes.
  3. *
  4. * Copyright (C) 2014 ARM Ltd.
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #include <asm/arch_timer.h>
  18. #include <asm/cachetype.h>
  19. #include <asm/cpu.h>
  20. #include <asm/cputype.h>
  21. #include <asm/cpufeature.h>
  22. #include <linux/bitops.h>
  23. #include <linux/bug.h>
  24. #include <linux/init.h>
  25. #include <linux/kernel.h>
  26. #include <linux/preempt.h>
  27. #include <linux/printk.h>
  28. #include <linux/smp.h>
  29. /*
  30. * In case the boot CPU is hotpluggable, we record its initial state and
  31. * current state separately. Certain system registers may contain different
  32. * values depending on configuration at or after reset.
  33. */
  34. DEFINE_PER_CPU(struct cpuinfo_arm64, cpu_data);
  35. static struct cpuinfo_arm64 boot_cpu_data;
  36. static bool mixed_endian_el0 = true;
  37. static char *icache_policy_str[] = {
  38. [ICACHE_POLICY_RESERVED] = "RESERVED/UNKNOWN",
  39. [ICACHE_POLICY_AIVIVT] = "AIVIVT",
  40. [ICACHE_POLICY_VIPT] = "VIPT",
  41. [ICACHE_POLICY_PIPT] = "PIPT",
  42. };
  43. unsigned long __icache_flags;
  44. static void cpuinfo_detect_icache_policy(struct cpuinfo_arm64 *info)
  45. {
  46. unsigned int cpu = smp_processor_id();
  47. u32 l1ip = CTR_L1IP(info->reg_ctr);
  48. if (l1ip != ICACHE_POLICY_PIPT) {
  49. /*
  50. * VIPT caches are non-aliasing if the VA always equals the PA
  51. * in all bit positions that are covered by the index. This is
  52. * the case if the size of a way (# of sets * line size) does
  53. * not exceed PAGE_SIZE.
  54. */
  55. u32 waysize = icache_get_numsets() * icache_get_linesize();
  56. if (l1ip != ICACHE_POLICY_VIPT || waysize > PAGE_SIZE)
  57. set_bit(ICACHEF_ALIASING, &__icache_flags);
  58. }
  59. if (l1ip == ICACHE_POLICY_AIVIVT)
  60. set_bit(ICACHEF_AIVIVT, &__icache_flags);
  61. pr_info("Detected %s I-cache on CPU%d\n", icache_policy_str[l1ip], cpu);
  62. }
  63. bool cpu_supports_mixed_endian_el0(void)
  64. {
  65. return id_aa64mmfr0_mixed_endian_el0(read_cpuid(ID_AA64MMFR0_EL1));
  66. }
  67. bool system_supports_mixed_endian_el0(void)
  68. {
  69. return mixed_endian_el0;
  70. }
  71. static void update_mixed_endian_el0_support(struct cpuinfo_arm64 *info)
  72. {
  73. mixed_endian_el0 &= id_aa64mmfr0_mixed_endian_el0(info->reg_id_aa64mmfr0);
  74. }
  75. static void update_cpu_features(struct cpuinfo_arm64 *info)
  76. {
  77. update_mixed_endian_el0_support(info);
  78. }
  79. static int check_reg_mask(char *name, u64 mask, u64 boot, u64 cur, int cpu)
  80. {
  81. if ((boot & mask) == (cur & mask))
  82. return 0;
  83. pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016lx, CPU%d: %#016lx\n",
  84. name, (unsigned long)boot, cpu, (unsigned long)cur);
  85. return 1;
  86. }
  87. #define CHECK_MASK(field, mask, boot, cur, cpu) \
  88. check_reg_mask(#field, mask, (boot)->reg_ ## field, (cur)->reg_ ## field, cpu)
  89. #define CHECK(field, boot, cur, cpu) \
  90. CHECK_MASK(field, ~0ULL, boot, cur, cpu)
  91. /*
  92. * Verify that CPUs don't have unexpected differences that will cause problems.
  93. */
  94. static void cpuinfo_sanity_check(struct cpuinfo_arm64 *cur)
  95. {
  96. unsigned int cpu = smp_processor_id();
  97. struct cpuinfo_arm64 *boot = &boot_cpu_data;
  98. unsigned int diff = 0;
  99. /*
  100. * The kernel can handle differing I-cache policies, but otherwise
  101. * caches should look identical. Userspace JITs will make use of
  102. * *minLine.
  103. */
  104. diff |= CHECK_MASK(ctr, 0xffff3fff, boot, cur, cpu);
  105. /*
  106. * Userspace may perform DC ZVA instructions. Mismatched block sizes
  107. * could result in too much or too little memory being zeroed if a
  108. * process is preempted and migrated between CPUs.
  109. */
  110. diff |= CHECK(dczid, boot, cur, cpu);
  111. /* If different, timekeeping will be broken (especially with KVM) */
  112. diff |= CHECK(cntfrq, boot, cur, cpu);
  113. /*
  114. * The kernel uses self-hosted debug features and expects CPUs to
  115. * support identical debug features. We presently need CTX_CMPs, WRPs,
  116. * and BRPs to be identical.
  117. * ID_AA64DFR1 is currently RES0.
  118. */
  119. diff |= CHECK(id_aa64dfr0, boot, cur, cpu);
  120. diff |= CHECK(id_aa64dfr1, boot, cur, cpu);
  121. /*
  122. * Even in big.LITTLE, processors should be identical instruction-set
  123. * wise.
  124. */
  125. diff |= CHECK(id_aa64isar0, boot, cur, cpu);
  126. diff |= CHECK(id_aa64isar1, boot, cur, cpu);
  127. /*
  128. * Differing PARange support is fine as long as all peripherals and
  129. * memory are mapped within the minimum PARange of all CPUs.
  130. * Linux should not care about secure memory.
  131. * ID_AA64MMFR1 is currently RES0.
  132. */
  133. diff |= CHECK_MASK(id_aa64mmfr0, 0xffffffffffff0ff0, boot, cur, cpu);
  134. diff |= CHECK(id_aa64mmfr1, boot, cur, cpu);
  135. /*
  136. * EL3 is not our concern.
  137. * ID_AA64PFR1 is currently RES0.
  138. */
  139. diff |= CHECK_MASK(id_aa64pfr0, 0xffffffffffff0fff, boot, cur, cpu);
  140. diff |= CHECK(id_aa64pfr1, boot, cur, cpu);
  141. /*
  142. * If we have AArch32, we care about 32-bit features for compat. These
  143. * registers should be RES0 otherwise.
  144. */
  145. diff |= CHECK(id_dfr0, boot, cur, cpu);
  146. diff |= CHECK(id_isar0, boot, cur, cpu);
  147. diff |= CHECK(id_isar1, boot, cur, cpu);
  148. diff |= CHECK(id_isar2, boot, cur, cpu);
  149. diff |= CHECK(id_isar3, boot, cur, cpu);
  150. diff |= CHECK(id_isar4, boot, cur, cpu);
  151. diff |= CHECK(id_isar5, boot, cur, cpu);
  152. /*
  153. * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
  154. * ACTLR formats could differ across CPUs and therefore would have to
  155. * be trapped for virtualization anyway.
  156. */
  157. diff |= CHECK_MASK(id_mmfr0, 0xff0fffff, boot, cur, cpu);
  158. diff |= CHECK(id_mmfr1, boot, cur, cpu);
  159. diff |= CHECK(id_mmfr2, boot, cur, cpu);
  160. diff |= CHECK(id_mmfr3, boot, cur, cpu);
  161. diff |= CHECK(id_pfr0, boot, cur, cpu);
  162. diff |= CHECK(id_pfr1, boot, cur, cpu);
  163. diff |= CHECK(mvfr0, boot, cur, cpu);
  164. diff |= CHECK(mvfr1, boot, cur, cpu);
  165. diff |= CHECK(mvfr2, boot, cur, cpu);
  166. /*
  167. * Mismatched CPU features are a recipe for disaster. Don't even
  168. * pretend to support them.
  169. */
  170. WARN_TAINT_ONCE(diff, TAINT_CPU_OUT_OF_SPEC,
  171. "Unsupported CPU feature variation.\n");
  172. }
  173. static void __cpuinfo_store_cpu(struct cpuinfo_arm64 *info)
  174. {
  175. info->reg_cntfrq = arch_timer_get_cntfrq();
  176. info->reg_ctr = read_cpuid_cachetype();
  177. info->reg_dczid = read_cpuid(DCZID_EL0);
  178. info->reg_midr = read_cpuid_id();
  179. info->reg_id_aa64dfr0 = read_cpuid(ID_AA64DFR0_EL1);
  180. info->reg_id_aa64dfr1 = read_cpuid(ID_AA64DFR1_EL1);
  181. info->reg_id_aa64isar0 = read_cpuid(ID_AA64ISAR0_EL1);
  182. info->reg_id_aa64isar1 = read_cpuid(ID_AA64ISAR1_EL1);
  183. info->reg_id_aa64mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
  184. info->reg_id_aa64mmfr1 = read_cpuid(ID_AA64MMFR1_EL1);
  185. info->reg_id_aa64pfr0 = read_cpuid(ID_AA64PFR0_EL1);
  186. info->reg_id_aa64pfr1 = read_cpuid(ID_AA64PFR1_EL1);
  187. info->reg_id_dfr0 = read_cpuid(ID_DFR0_EL1);
  188. info->reg_id_isar0 = read_cpuid(ID_ISAR0_EL1);
  189. info->reg_id_isar1 = read_cpuid(ID_ISAR1_EL1);
  190. info->reg_id_isar2 = read_cpuid(ID_ISAR2_EL1);
  191. info->reg_id_isar3 = read_cpuid(ID_ISAR3_EL1);
  192. info->reg_id_isar4 = read_cpuid(ID_ISAR4_EL1);
  193. info->reg_id_isar5 = read_cpuid(ID_ISAR5_EL1);
  194. info->reg_id_mmfr0 = read_cpuid(ID_MMFR0_EL1);
  195. info->reg_id_mmfr1 = read_cpuid(ID_MMFR1_EL1);
  196. info->reg_id_mmfr2 = read_cpuid(ID_MMFR2_EL1);
  197. info->reg_id_mmfr3 = read_cpuid(ID_MMFR3_EL1);
  198. info->reg_id_pfr0 = read_cpuid(ID_PFR0_EL1);
  199. info->reg_id_pfr1 = read_cpuid(ID_PFR1_EL1);
  200. info->reg_mvfr0 = read_cpuid(MVFR0_EL1);
  201. info->reg_mvfr1 = read_cpuid(MVFR1_EL1);
  202. info->reg_mvfr2 = read_cpuid(MVFR2_EL1);
  203. cpuinfo_detect_icache_policy(info);
  204. check_local_cpu_errata();
  205. check_local_cpu_features();
  206. update_cpu_features(info);
  207. }
  208. void cpuinfo_store_cpu(void)
  209. {
  210. struct cpuinfo_arm64 *info = this_cpu_ptr(&cpu_data);
  211. __cpuinfo_store_cpu(info);
  212. cpuinfo_sanity_check(info);
  213. }
  214. void __init cpuinfo_store_boot_cpu(void)
  215. {
  216. struct cpuinfo_arm64 *info = &per_cpu(cpu_data, 0);
  217. __cpuinfo_store_cpu(info);
  218. boot_cpu_data = *info;
  219. }