cpuid.h 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef ARCH_X86_KVM_CPUID_H
  3. #define ARCH_X86_KVM_CPUID_H
  4. #include "x86.h"
  5. #include <asm/cpu.h>
  6. #include <asm/processor.h>
  7. int kvm_update_cpuid(struct kvm_vcpu *vcpu);
  8. bool kvm_mpx_supported(void);
  9. struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
  10. u32 function, u32 index);
  11. int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
  12. struct kvm_cpuid_entry2 __user *entries,
  13. unsigned int type);
  14. int kvm_vcpu_ioctl_set_cpuid(struct kvm_vcpu *vcpu,
  15. struct kvm_cpuid *cpuid,
  16. struct kvm_cpuid_entry __user *entries);
  17. int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
  18. struct kvm_cpuid2 *cpuid,
  19. struct kvm_cpuid_entry2 __user *entries);
  20. int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
  21. struct kvm_cpuid2 *cpuid,
  22. struct kvm_cpuid_entry2 __user *entries);
  23. bool kvm_cpuid(struct kvm_vcpu *vcpu, u32 *eax, u32 *ebx,
  24. u32 *ecx, u32 *edx, bool check_limit);
  25. int cpuid_query_maxphyaddr(struct kvm_vcpu *vcpu);
  26. static inline int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
  27. {
  28. return vcpu->arch.maxphyaddr;
  29. }
  30. struct cpuid_reg {
  31. u32 function;
  32. u32 index;
  33. int reg;
  34. };
  35. static const struct cpuid_reg reverse_cpuid[] = {
  36. [CPUID_1_EDX] = { 1, 0, CPUID_EDX},
  37. [CPUID_8000_0001_EDX] = {0x80000001, 0, CPUID_EDX},
  38. [CPUID_8086_0001_EDX] = {0x80860001, 0, CPUID_EDX},
  39. [CPUID_1_ECX] = { 1, 0, CPUID_ECX},
  40. [CPUID_C000_0001_EDX] = {0xc0000001, 0, CPUID_EDX},
  41. [CPUID_8000_0001_ECX] = {0x80000001, 0, CPUID_ECX},
  42. [CPUID_7_0_EBX] = { 7, 0, CPUID_EBX},
  43. [CPUID_D_1_EAX] = { 0xd, 1, CPUID_EAX},
  44. [CPUID_8000_0008_EBX] = {0x80000008, 0, CPUID_EBX},
  45. [CPUID_6_EAX] = { 6, 0, CPUID_EAX},
  46. [CPUID_8000_000A_EDX] = {0x8000000a, 0, CPUID_EDX},
  47. [CPUID_7_ECX] = { 7, 0, CPUID_ECX},
  48. [CPUID_8000_0007_EBX] = {0x80000007, 0, CPUID_EBX},
  49. [CPUID_7_EDX] = { 7, 0, CPUID_EDX},
  50. };
  51. static __always_inline struct cpuid_reg x86_feature_cpuid(unsigned x86_feature)
  52. {
  53. unsigned x86_leaf = x86_feature / 32;
  54. BUILD_BUG_ON(x86_leaf >= ARRAY_SIZE(reverse_cpuid));
  55. BUILD_BUG_ON(reverse_cpuid[x86_leaf].function == 0);
  56. return reverse_cpuid[x86_leaf];
  57. }
  58. static __always_inline int *guest_cpuid_get_register(struct kvm_vcpu *vcpu, unsigned x86_feature)
  59. {
  60. struct kvm_cpuid_entry2 *entry;
  61. const struct cpuid_reg cpuid = x86_feature_cpuid(x86_feature);
  62. entry = kvm_find_cpuid_entry(vcpu, cpuid.function, cpuid.index);
  63. if (!entry)
  64. return NULL;
  65. switch (cpuid.reg) {
  66. case CPUID_EAX:
  67. return &entry->eax;
  68. case CPUID_EBX:
  69. return &entry->ebx;
  70. case CPUID_ECX:
  71. return &entry->ecx;
  72. case CPUID_EDX:
  73. return &entry->edx;
  74. default:
  75. BUILD_BUG();
  76. return NULL;
  77. }
  78. }
  79. static __always_inline bool guest_cpuid_has(struct kvm_vcpu *vcpu, unsigned x86_feature)
  80. {
  81. int *reg;
  82. if (x86_feature == X86_FEATURE_XSAVE &&
  83. !static_cpu_has(X86_FEATURE_XSAVE))
  84. return false;
  85. reg = guest_cpuid_get_register(vcpu, x86_feature);
  86. if (!reg)
  87. return false;
  88. return *reg & bit(x86_feature);
  89. }
  90. static __always_inline void guest_cpuid_clear(struct kvm_vcpu *vcpu, unsigned x86_feature)
  91. {
  92. int *reg;
  93. reg = guest_cpuid_get_register(vcpu, x86_feature);
  94. if (reg)
  95. *reg &= ~bit(x86_feature);
  96. }
  97. static inline bool guest_cpuid_is_amd(struct kvm_vcpu *vcpu)
  98. {
  99. struct kvm_cpuid_entry2 *best;
  100. best = kvm_find_cpuid_entry(vcpu, 0, 0);
  101. return best && best->ebx == X86EMUL_CPUID_VENDOR_AuthenticAMD_ebx;
  102. }
  103. static inline int guest_cpuid_family(struct kvm_vcpu *vcpu)
  104. {
  105. struct kvm_cpuid_entry2 *best;
  106. best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
  107. if (!best)
  108. return -1;
  109. return x86_family(best->eax);
  110. }
  111. static inline int guest_cpuid_model(struct kvm_vcpu *vcpu)
  112. {
  113. struct kvm_cpuid_entry2 *best;
  114. best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
  115. if (!best)
  116. return -1;
  117. return x86_model(best->eax);
  118. }
  119. static inline int guest_cpuid_stepping(struct kvm_vcpu *vcpu)
  120. {
  121. struct kvm_cpuid_entry2 *best;
  122. best = kvm_find_cpuid_entry(vcpu, 0x1, 0);
  123. if (!best)
  124. return -1;
  125. return x86_stepping(best->eax);
  126. }
  127. static inline bool supports_cpuid_fault(struct kvm_vcpu *vcpu)
  128. {
  129. return vcpu->arch.msr_platform_info & MSR_PLATFORM_INFO_CPUID_FAULT;
  130. }
  131. static inline bool cpuid_fault_enabled(struct kvm_vcpu *vcpu)
  132. {
  133. return vcpu->arch.msr_misc_features_enables &
  134. MSR_MISC_FEATURES_ENABLES_CPUID_FAULT;
  135. }
  136. #endif