pmu.c 9.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350
  1. /*
  2. * Kernel-based Virtual Machine -- Performance Monitoring Unit support
  3. *
  4. * Copyright 2015 Red Hat, Inc. and/or its affiliates.
  5. *
  6. * Authors:
  7. * Avi Kivity <avi@redhat.com>
  8. * Gleb Natapov <gleb@redhat.com>
  9. * Wei Huang <wei@redhat.com>
  10. *
  11. * This work is licensed under the terms of the GNU GPL, version 2. See
  12. * the COPYING file in the top-level directory.
  13. *
  14. */
  15. #include <linux/types.h>
  16. #include <linux/kvm_host.h>
  17. #include <linux/perf_event.h>
  18. #include <asm/perf_event.h>
  19. #include "x86.h"
  20. #include "cpuid.h"
  21. #include "lapic.h"
  22. #include "pmu.h"
  23. /* NOTE:
  24. * - Each perf counter is defined as "struct kvm_pmc";
  25. * - There are two types of perf counters: general purpose (gp) and fixed.
  26. * gp counters are stored in gp_counters[] and fixed counters are stored
  27. * in fixed_counters[] respectively. Both of them are part of "struct
  28. * kvm_pmu";
  29. * - pmu.c understands the difference between gp counters and fixed counters.
  30. * However AMD doesn't support fixed-counters;
  31. * - There are three types of index to access perf counters (PMC):
  32. * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
  33. * has MSR_K7_PERFCTRn.
  34. * 2. MSR Index (named idx): This normally is used by RDPMC instruction.
  35. * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
  36. * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
  37. * that it also supports fixed counters. idx can be used to as index to
  38. * gp and fixed counters.
  39. * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
  40. * code. Each pmc, stored in kvm_pmc.idx field, is unique across
  41. * all perf counters (both gp and fixed). The mapping relationship
  42. * between pmc and perf counters is as the following:
  43. * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
  44. * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
  45. * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
  46. */
  47. static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
  48. {
  49. struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
  50. struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
  51. kvm_pmu_deliver_pmi(vcpu);
  52. }
  53. static void kvm_perf_overflow(struct perf_event *perf_event,
  54. struct perf_sample_data *data,
  55. struct pt_regs *regs)
  56. {
  57. struct kvm_pmc *pmc = perf_event->overflow_handler_context;
  58. struct kvm_pmu *pmu = pmc_to_pmu(pmc);
  59. if (!test_and_set_bit(pmc->idx,
  60. (unsigned long *)&pmu->reprogram_pmi)) {
  61. __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
  62. kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
  63. }
  64. }
  65. static void kvm_perf_overflow_intr(struct perf_event *perf_event,
  66. struct perf_sample_data *data,
  67. struct pt_regs *regs)
  68. {
  69. struct kvm_pmc *pmc = perf_event->overflow_handler_context;
  70. struct kvm_pmu *pmu = pmc_to_pmu(pmc);
  71. if (!test_and_set_bit(pmc->idx,
  72. (unsigned long *)&pmu->reprogram_pmi)) {
  73. __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
  74. kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
  75. /*
  76. * Inject PMI. If vcpu was in a guest mode during NMI PMI
  77. * can be ejected on a guest mode re-entry. Otherwise we can't
  78. * be sure that vcpu wasn't executing hlt instruction at the
  79. * time of vmexit and is not going to re-enter guest mode until
  80. * woken up. So we should wake it, but this is impossible from
  81. * NMI context. Do it from irq work instead.
  82. */
  83. if (!kvm_is_in_guest())
  84. irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
  85. else
  86. kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
  87. }
  88. }
  89. static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
  90. unsigned config, bool exclude_user,
  91. bool exclude_kernel, bool intr,
  92. bool in_tx, bool in_tx_cp)
  93. {
  94. struct perf_event *event;
  95. struct perf_event_attr attr = {
  96. .type = type,
  97. .size = sizeof(attr),
  98. .pinned = true,
  99. .exclude_idle = true,
  100. .exclude_host = 1,
  101. .exclude_user = exclude_user,
  102. .exclude_kernel = exclude_kernel,
  103. .config = config,
  104. };
  105. attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
  106. if (in_tx)
  107. attr.config |= HSW_IN_TX;
  108. if (in_tx_cp) {
  109. /*
  110. * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
  111. * period. Just clear the sample period so at least
  112. * allocating the counter doesn't fail.
  113. */
  114. attr.sample_period = 0;
  115. attr.config |= HSW_IN_TX_CHECKPOINTED;
  116. }
  117. event = perf_event_create_kernel_counter(&attr, -1, current,
  118. intr ? kvm_perf_overflow_intr :
  119. kvm_perf_overflow, pmc);
  120. if (IS_ERR(event)) {
  121. pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n",
  122. PTR_ERR(event), pmc->idx);
  123. return;
  124. }
  125. pmc->perf_event = event;
  126. clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
  127. }
  128. void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
  129. {
  130. unsigned config, type = PERF_TYPE_RAW;
  131. u8 event_select, unit_mask;
  132. if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
  133. printk_once("kvm pmu: pin control bit is ignored\n");
  134. pmc->eventsel = eventsel;
  135. pmc_stop_counter(pmc);
  136. if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
  137. return;
  138. event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
  139. unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
  140. if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
  141. ARCH_PERFMON_EVENTSEL_INV |
  142. ARCH_PERFMON_EVENTSEL_CMASK |
  143. HSW_IN_TX |
  144. HSW_IN_TX_CHECKPOINTED))) {
  145. config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc),
  146. event_select,
  147. unit_mask);
  148. if (config != PERF_COUNT_HW_MAX)
  149. type = PERF_TYPE_HARDWARE;
  150. }
  151. if (type == PERF_TYPE_RAW)
  152. config = eventsel & X86_RAW_EVENT_MASK;
  153. pmc_reprogram_counter(pmc, type, config,
  154. !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
  155. !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
  156. eventsel & ARCH_PERFMON_EVENTSEL_INT,
  157. (eventsel & HSW_IN_TX),
  158. (eventsel & HSW_IN_TX_CHECKPOINTED));
  159. }
  160. EXPORT_SYMBOL_GPL(reprogram_gp_counter);
  161. void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
  162. {
  163. unsigned en_field = ctrl & 0x3;
  164. bool pmi = ctrl & 0x8;
  165. pmc_stop_counter(pmc);
  166. if (!en_field || !pmc_is_enabled(pmc))
  167. return;
  168. pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
  169. kvm_x86_ops->pmu_ops->find_fixed_event(idx),
  170. !(en_field & 0x2), /* exclude user */
  171. !(en_field & 0x1), /* exclude kernel */
  172. pmi, false, false);
  173. }
  174. EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
  175. void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
  176. {
  177. struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
  178. if (!pmc)
  179. return;
  180. if (pmc_is_gp(pmc))
  181. reprogram_gp_counter(pmc, pmc->eventsel);
  182. else {
  183. int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
  184. u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
  185. reprogram_fixed_counter(pmc, ctrl, idx);
  186. }
  187. }
  188. EXPORT_SYMBOL_GPL(reprogram_counter);
  189. void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
  190. {
  191. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  192. u64 bitmask;
  193. int bit;
  194. bitmask = pmu->reprogram_pmi;
  195. for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
  196. struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit);
  197. if (unlikely(!pmc || !pmc->perf_event)) {
  198. clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
  199. continue;
  200. }
  201. reprogram_counter(pmu, bit);
  202. }
  203. }
  204. /* check if idx is a valid index to access PMU */
  205. int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
  206. {
  207. return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx);
  208. }
  209. bool is_vmware_backdoor_pmc(u32 pmc_idx)
  210. {
  211. switch (pmc_idx) {
  212. case VMWARE_BACKDOOR_PMC_HOST_TSC:
  213. case VMWARE_BACKDOOR_PMC_REAL_TIME:
  214. case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
  215. return true;
  216. }
  217. return false;
  218. }
  219. static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
  220. {
  221. u64 ctr_val;
  222. switch (idx) {
  223. case VMWARE_BACKDOOR_PMC_HOST_TSC:
  224. ctr_val = rdtsc();
  225. break;
  226. case VMWARE_BACKDOOR_PMC_REAL_TIME:
  227. ctr_val = ktime_get_boot_ns();
  228. break;
  229. case VMWARE_BACKDOOR_PMC_APPARENT_TIME:
  230. ctr_val = ktime_get_boot_ns() +
  231. vcpu->kvm->arch.kvmclock_offset;
  232. break;
  233. default:
  234. return 1;
  235. }
  236. *data = ctr_val;
  237. return 0;
  238. }
  239. int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
  240. {
  241. bool fast_mode = idx & (1u << 31);
  242. struct kvm_pmc *pmc;
  243. u64 mask = fast_mode ? ~0u : ~0ull;
  244. if (is_vmware_backdoor_pmc(idx))
  245. return kvm_pmu_rdpmc_vmware(vcpu, idx, data);
  246. pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask);
  247. if (!pmc)
  248. return 1;
  249. *data = pmc_read_counter(pmc) & mask;
  250. return 0;
  251. }
  252. void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
  253. {
  254. if (lapic_in_kernel(vcpu))
  255. kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
  256. }
  257. bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
  258. {
  259. return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr);
  260. }
  261. int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
  262. {
  263. return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data);
  264. }
  265. int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
  266. {
  267. return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info);
  268. }
  269. /* refresh PMU settings. This function generally is called when underlying
  270. * settings are changed (such as changes of PMU CPUID by guest VMs), which
  271. * should rarely happen.
  272. */
  273. void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
  274. {
  275. kvm_x86_ops->pmu_ops->refresh(vcpu);
  276. }
  277. void kvm_pmu_reset(struct kvm_vcpu *vcpu)
  278. {
  279. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  280. irq_work_sync(&pmu->irq_work);
  281. kvm_x86_ops->pmu_ops->reset(vcpu);
  282. }
  283. void kvm_pmu_init(struct kvm_vcpu *vcpu)
  284. {
  285. struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
  286. memset(pmu, 0, sizeof(*pmu));
  287. kvm_x86_ops->pmu_ops->init(vcpu);
  288. init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
  289. kvm_pmu_refresh(vcpu);
  290. }
  291. void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
  292. {
  293. kvm_pmu_reset(vcpu);
  294. }