pmu_amd.c 7.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320
  1. /*
  2. * KVM PMU support for AMD
  3. *
  4. * Copyright 2015, Red Hat, Inc. and/or its affiliates.
  5. *
  6. * Author:
  7. * Wei Huang <wei@redhat.com>
  8. *
  9. * This work is licensed under the terms of the GNU GPL, version 2. See
  10. * the COPYING file in the top-level directory.
  11. *
  12. * Implementation is based on pmu_intel.c file
  13. */
  14. #include <linux/types.h>
  15. #include <linux/kvm_host.h>
  16. #include <linux/perf_event.h>
  17. #include "x86.h"
  18. #include "cpuid.h"
  19. #include "lapic.h"
  20. #include "pmu.h"
  21. enum pmu_type {
  22. PMU_TYPE_COUNTER = 0,
  23. PMU_TYPE_EVNTSEL,
  24. };
  25. enum index {
  26. INDEX_ZERO = 0,
  27. INDEX_ONE,
  28. INDEX_TWO,
  29. INDEX_THREE,
  30. INDEX_FOUR,
  31. INDEX_FIVE,
  32. INDEX_ERROR,
  33. };
  34. /* duplicated from amd_perfmon_event_map, K7 and above should work. */
  35. static struct kvm_event_hw_type_mapping amd_event_mapping[] = {
  36. [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES },
  37. [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
  38. [2] = { 0x7d, 0x07, PERF_COUNT_HW_CACHE_REFERENCES },
  39. [3] = { 0x7e, 0x07, PERF_COUNT_HW_CACHE_MISSES },
  40. [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
  41. [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
  42. [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
  43. [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
  44. };
  45. static unsigned int get_msr_base(struct kvm_pmu *pmu, enum pmu_type type)
  46. {
  47. struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
  48. if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
  49. if (type == PMU_TYPE_COUNTER)
  50. return MSR_F15H_PERF_CTR;
  51. else
  52. return MSR_F15H_PERF_CTL;
  53. } else {
  54. if (type == PMU_TYPE_COUNTER)
  55. return MSR_K7_PERFCTR0;
  56. else
  57. return MSR_K7_EVNTSEL0;
  58. }
  59. }
  60. static enum index msr_to_index(u32 msr)
  61. {
  62. switch (msr) {
  63. case MSR_F15H_PERF_CTL0:
  64. case MSR_F15H_PERF_CTR0:
  65. case MSR_K7_EVNTSEL0:
  66. case MSR_K7_PERFCTR0:
  67. return INDEX_ZERO;
  68. case MSR_F15H_PERF_CTL1:
  69. case MSR_F15H_PERF_CTR1:
  70. case MSR_K7_EVNTSEL1:
  71. case MSR_K7_PERFCTR1:
  72. return INDEX_ONE;
  73. case MSR_F15H_PERF_CTL2:
  74. case MSR_F15H_PERF_CTR2:
  75. case MSR_K7_EVNTSEL2:
  76. case MSR_K7_PERFCTR2:
  77. return INDEX_TWO;
  78. case MSR_F15H_PERF_CTL3:
  79. case MSR_F15H_PERF_CTR3:
  80. case MSR_K7_EVNTSEL3:
  81. case MSR_K7_PERFCTR3:
  82. return INDEX_THREE;
  83. case MSR_F15H_PERF_CTL4:
  84. case MSR_F15H_PERF_CTR4:
  85. return INDEX_FOUR;
  86. case MSR_F15H_PERF_CTL5:
  87. case MSR_F15H_PERF_CTR5:
  88. return INDEX_FIVE;
  89. default:
  90. return INDEX_ERROR;
  91. }
  92. }
  93. static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
  94. enum pmu_type type)
  95. {
  96. switch (msr) {
  97. case MSR_F15H_PERF_CTL0:
  98. case MSR_F15H_PERF_CTL1:
  99. case MSR_F15H_PERF_CTL2:
  100. case MSR_F15H_PERF_CTL3:
  101. case MSR_F15H_PERF_CTL4:
  102. case MSR_F15H_PERF_CTL5:
  103. case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
  104. if (type != PMU_TYPE_EVNTSEL)
  105. return NULL;
  106. break;
  107. case MSR_F15H_PERF_CTR0:
  108. case MSR_F15H_PERF_CTR1:
  109. case MSR_F15H_PERF_CTR2:
  110. case MSR_F15H_PERF_CTR3:
  111. case MSR_F15H_PERF_CTR4:
  112. case MSR_F15H_PERF_CTR5:
  113. case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
  114. if (type != PMU_TYPE_COUNTER)
  115. return NULL;
  116. break;
  117. default:
  118. return NULL;
  119. }
  120. return &pmu->gp_counters[msr_to_index(msr)];
  121. }
  122. static unsigned amd_find_arch_event(struct kvm_pmu *pmu,
  123. u8 event_select,
  124. u8 unit_mask)
  125. {
  126. int i;
  127. for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++)
  128. if (amd_event_mapping[i].eventsel == event_select
  129. && amd_event_mapping[i].unit_mask == unit_mask)
  130. break;
  131. if (i == ARRAY_SIZE(amd_event_mapping))
  132. return PERF_COUNT_HW_MAX;
  133. return amd_event_mapping[i].event_type;
  134. }
  135. /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */
  136. static unsigned amd_find_fixed_event(int idx)
  137. {
  138. return PERF_COUNT_HW_MAX;
  139. }
  140. /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
  141. * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
  142. */
  143. static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
  144. {
  145. return true;
  146. }
  147. static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
  148. {
  149. unsigned int base = get_msr_base(pmu, PMU_TYPE_COUNTER);
  150. struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
  151. if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE)) {
  152. /*
  153. * The idx is contiguous. The MSRs are not. The counter MSRs
  154. * are interleaved with the event select MSRs.
  155. */
  156. pmc_idx *= 2;
  157. }
  158. return get_gp_pmc_amd(pmu, base + pmc_idx, PMU_TYPE_COUNTER);
  159. }
  160. /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */
  161. static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
  162. {
  163. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  164. idx &= ~(3u << 30);
  165. return (idx >= pmu->nr_arch_gp_counters);
  166. }
  167. /* idx is the ECX register of RDPMC instruction */
  168. static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *mask)
  169. {
  170. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  171. struct kvm_pmc *counters;
  172. idx &= ~(3u << 30);
  173. if (idx >= pmu->nr_arch_gp_counters)
  174. return NULL;
  175. counters = pmu->gp_counters;
  176. return &counters[idx];
  177. }
  178. static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
  179. {
  180. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  181. int ret = false;
  182. ret = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER) ||
  183. get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
  184. return ret;
  185. }
  186. static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
  187. {
  188. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  189. struct kvm_pmc *pmc;
  190. /* MSR_PERFCTRn */
  191. pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
  192. if (pmc) {
  193. *data = pmc_read_counter(pmc);
  194. return 0;
  195. }
  196. /* MSR_EVNTSELn */
  197. pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
  198. if (pmc) {
  199. *data = pmc->eventsel;
  200. return 0;
  201. }
  202. return 1;
  203. }
  204. static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  205. {
  206. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  207. struct kvm_pmc *pmc;
  208. u32 msr = msr_info->index;
  209. u64 data = msr_info->data;
  210. /* MSR_PERFCTRn */
  211. pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
  212. if (pmc) {
  213. pmc->counter += data - pmc_read_counter(pmc);
  214. return 0;
  215. }
  216. /* MSR_EVNTSELn */
  217. pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
  218. if (pmc) {
  219. if (data == pmc->eventsel)
  220. return 0;
  221. if (!(data & pmu->reserved_bits)) {
  222. reprogram_gp_counter(pmc, data);
  223. return 0;
  224. }
  225. }
  226. return 1;
  227. }
  228. static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
  229. {
  230. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  231. if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
  232. pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
  233. else
  234. pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
  235. pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
  236. pmu->reserved_bits = 0xffffffff00200000ull;
  237. /* not applicable to AMD; but clean them to prevent any fall out */
  238. pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
  239. pmu->nr_arch_fixed_counters = 0;
  240. pmu->version = 0;
  241. pmu->global_status = 0;
  242. }
  243. static void amd_pmu_init(struct kvm_vcpu *vcpu)
  244. {
  245. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  246. int i;
  247. BUILD_BUG_ON(AMD64_NUM_COUNTERS_CORE > INTEL_PMC_MAX_GENERIC);
  248. for (i = 0; i < AMD64_NUM_COUNTERS_CORE ; i++) {
  249. pmu->gp_counters[i].type = KVM_PMC_GP;
  250. pmu->gp_counters[i].vcpu = vcpu;
  251. pmu->gp_counters[i].idx = i;
  252. }
  253. }
  254. static void amd_pmu_reset(struct kvm_vcpu *vcpu)
  255. {
  256. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  257. int i;
  258. for (i = 0; i < AMD64_NUM_COUNTERS_CORE; i++) {
  259. struct kvm_pmc *pmc = &pmu->gp_counters[i];
  260. pmc_stop_counter(pmc);
  261. pmc->counter = pmc->eventsel = 0;
  262. }
  263. }
  264. struct kvm_pmu_ops amd_pmu_ops = {
  265. .find_arch_event = amd_find_arch_event,
  266. .find_fixed_event = amd_find_fixed_event,
  267. .pmc_is_enabled = amd_pmc_is_enabled,
  268. .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
  269. .msr_idx_to_pmc = amd_msr_idx_to_pmc,
  270. .is_valid_msr_idx = amd_is_valid_msr_idx,
  271. .is_valid_msr = amd_is_valid_msr,
  272. .get_msr = amd_pmu_get_msr,
  273. .set_msr = amd_pmu_set_msr,
  274. .refresh = amd_pmu_refresh,
  275. .init = amd_pmu_init,
  276. .reset = amd_pmu_reset,
  277. };