perf_event.h 2.1 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Copyright (C) 2018 SiFive
  4. * Copyright (C) 2018 Andes Technology Corporation
  5. *
  6. */
  7. #ifndef _ASM_RISCV_PERF_EVENT_H
  8. #define _ASM_RISCV_PERF_EVENT_H
  9. #include <linux/perf_event.h>
  10. #include <linux/ptrace.h>
  11. #include <linux/interrupt.h>
  12. #define RISCV_BASE_COUNTERS 2
  13. /*
  14. * The RISCV_MAX_COUNTERS parameter should be specified.
  15. */
  16. #ifdef CONFIG_RISCV_BASE_PMU
  17. #define RISCV_MAX_COUNTERS 2
  18. #endif
  19. #ifndef RISCV_MAX_COUNTERS
  20. #error "Please provide a valid RISCV_MAX_COUNTERS for the PMU."
  21. #endif
  22. /*
  23. * These are the indexes of bits in counteren register *minus* 1,
  24. * except for cycle. It would be coherent if it can directly mapped
  25. * to counteren bit definition, but there is a *time* register at
  26. * counteren[1]. Per-cpu structure is scarce resource here.
  27. *
  28. * According to the spec, an implementation can support counter up to
  29. * mhpmcounter31, but many high-end processors has at most 6 general
  30. * PMCs, we give the definition to MHPMCOUNTER8 here.
  31. */
  32. #define RISCV_PMU_CYCLE 0
  33. #define RISCV_PMU_INSTRET 1
  34. #define RISCV_PMU_MHPMCOUNTER3 2
  35. #define RISCV_PMU_MHPMCOUNTER4 3
  36. #define RISCV_PMU_MHPMCOUNTER5 4
  37. #define RISCV_PMU_MHPMCOUNTER6 5
  38. #define RISCV_PMU_MHPMCOUNTER7 6
  39. #define RISCV_PMU_MHPMCOUNTER8 7
  40. #define RISCV_OP_UNSUPP (-EOPNOTSUPP)
  41. struct cpu_hw_events {
  42. /* # currently enabled events*/
  43. int n_events;
  44. /* currently enabled events */
  45. struct perf_event *events[RISCV_MAX_COUNTERS];
  46. /* vendor-defined PMU data */
  47. void *platform;
  48. };
  49. struct riscv_pmu {
  50. struct pmu *pmu;
  51. /* generic hw/cache events table */
  52. const int *hw_events;
  53. const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
  54. [PERF_COUNT_HW_CACHE_OP_MAX]
  55. [PERF_COUNT_HW_CACHE_RESULT_MAX];
  56. /* method used to map hw/cache events */
  57. int (*map_hw_event)(u64 config);
  58. int (*map_cache_event)(u64 config);
  59. /* max generic hw events in map */
  60. int max_events;
  61. /* number total counters, 2(base) + x(general) */
  62. int num_counters;
  63. /* the width of the counter */
  64. int counter_width;
  65. /* vendor-defined PMU features */
  66. void *platform;
  67. irqreturn_t (*handle_irq)(int irq_num, void *dev);
  68. int irq;
  69. };
  70. #endif /* _ASM_RISCV_PERF_EVENT_H */