arm_pmu.h 4.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122
  1. /*
  2. * Copyright (C) 2015 Linaro Ltd.
  3. * Author: Shannon Zhao <shannon.zhao@linaro.org>
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License version 2 as
  7. * published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #ifndef __ASM_ARM_KVM_PMU_H
  18. #define __ASM_ARM_KVM_PMU_H
  19. #include <linux/perf_event.h>
  20. #include <asm/perf_event.h>
  21. #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
  22. #ifdef CONFIG_KVM_ARM_PMU
  23. struct kvm_pmc {
  24. u8 idx; /* index into the pmu->pmc array */
  25. struct perf_event *perf_event;
  26. u64 bitmask;
  27. };
  28. struct kvm_pmu {
  29. int irq_num;
  30. struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
  31. bool ready;
  32. bool created;
  33. bool irq_level;
  34. };
  35. #define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready)
  36. #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
  37. u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
  38. void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
  39. u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
  40. void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
  41. void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
  42. void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
  43. void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
  44. void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
  45. void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
  46. bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
  47. void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
  48. void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
  49. void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
  50. void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
  51. u64 select_idx);
  52. bool kvm_arm_support_pmu_v3(void);
  53. int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
  54. struct kvm_device_attr *attr);
  55. int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
  56. struct kvm_device_attr *attr);
  57. int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
  58. struct kvm_device_attr *attr);
  59. int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
  60. #else
  61. struct kvm_pmu {
  62. };
  63. #define kvm_arm_pmu_v3_ready(v) (false)
  64. #define kvm_arm_pmu_irq_initialized(v) (false)
  65. static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
  66. u64 select_idx)
  67. {
  68. return 0;
  69. }
  70. static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
  71. u64 select_idx, u64 val) {}
  72. static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
  73. {
  74. return 0;
  75. }
  76. static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
  77. static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
  78. static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
  79. static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
  80. static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
  81. static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
  82. static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
  83. {
  84. return false;
  85. }
  86. static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
  87. static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
  88. static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
  89. static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
  90. u64 data, u64 select_idx) {}
  91. static inline bool kvm_arm_support_pmu_v3(void) { return false; }
  92. static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
  93. struct kvm_device_attr *attr)
  94. {
  95. return -ENXIO;
  96. }
  97. static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
  98. struct kvm_device_attr *attr)
  99. {
  100. return -ENXIO;
  101. }
  102. static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
  103. struct kvm_device_attr *attr)
  104. {
  105. return -ENXIO;
  106. }
  107. static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu)
  108. {
  109. return 0;
  110. }
  111. #endif
  112. #endif