power8-pmu.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
  1. /*
  2. * Performance counter support for POWER8 processors.
  3. *
  4. * Copyright 2009 Paul Mackerras, IBM Corporation.
  5. * Copyright 2013 Michael Ellerman, IBM Corporation.
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. */
  12. #define pr_fmt(fmt) "power8-pmu: " fmt
  13. #include "isa207-common.h"
  14. /*
  15. * Some power8 event codes.
  16. */
  17. #define EVENT(_name, _code) _name = _code,
  18. enum {
  19. #include "power8-events-list.h"
  20. };
  21. #undef EVENT
  22. /* MMCRA IFM bits - POWER8 */
  23. #define POWER8_MMCRA_IFM1 0x0000000040000000UL
  24. #define POWER8_MMCRA_IFM2 0x0000000080000000UL
  25. #define POWER8_MMCRA_IFM3 0x00000000C0000000UL
  26. /* Table of alternatives, sorted by column 0 */
  27. static const unsigned int event_alternatives[][MAX_ALT] = {
  28. { PM_MRK_ST_CMPL, PM_MRK_ST_CMPL_ALT },
  29. { PM_BR_MRK_2PATH, PM_BR_MRK_2PATH_ALT },
  30. { PM_L3_CO_MEPF, PM_L3_CO_MEPF_ALT },
  31. { PM_MRK_DATA_FROM_L2MISS, PM_MRK_DATA_FROM_L2MISS_ALT },
  32. { PM_CMPLU_STALL_ALT, PM_CMPLU_STALL },
  33. { PM_BR_2PATH, PM_BR_2PATH_ALT },
  34. { PM_INST_DISP, PM_INST_DISP_ALT },
  35. { PM_RUN_CYC_ALT, PM_RUN_CYC },
  36. { PM_MRK_FILT_MATCH, PM_MRK_FILT_MATCH_ALT },
  37. { PM_LD_MISS_L1, PM_LD_MISS_L1_ALT },
  38. { PM_RUN_INST_CMPL_ALT, PM_RUN_INST_CMPL },
  39. };
  40. /*
  41. * Scan the alternatives table for a match and return the
  42. * index into the alternatives table if found, else -1.
  43. */
  44. static int find_alternative(u64 event)
  45. {
  46. int i, j;
  47. for (i = 0; i < ARRAY_SIZE(event_alternatives); ++i) {
  48. if (event < event_alternatives[i][0])
  49. break;
  50. for (j = 0; j < MAX_ALT && event_alternatives[i][j]; ++j)
  51. if (event == event_alternatives[i][j])
  52. return i;
  53. }
  54. return -1;
  55. }
  56. static int power8_get_alternatives(u64 event, unsigned int flags, u64 alt[])
  57. {
  58. int i, j, num_alt = 0;
  59. u64 alt_event;
  60. alt[num_alt++] = event;
  61. i = find_alternative(event);
  62. if (i >= 0) {
  63. /* Filter out the original event, it's already in alt[0] */
  64. for (j = 0; j < MAX_ALT; ++j) {
  65. alt_event = event_alternatives[i][j];
  66. if (alt_event && alt_event != event)
  67. alt[num_alt++] = alt_event;
  68. }
  69. }
  70. if (flags & PPMU_ONLY_COUNT_RUN) {
  71. /*
  72. * We're only counting in RUN state, so PM_CYC is equivalent to
  73. * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
  74. */
  75. j = num_alt;
  76. for (i = 0; i < num_alt; ++i) {
  77. switch (alt[i]) {
  78. case PM_CYC:
  79. alt[j++] = PM_RUN_CYC;
  80. break;
  81. case PM_RUN_CYC:
  82. alt[j++] = PM_CYC;
  83. break;
  84. case PM_INST_CMPL:
  85. alt[j++] = PM_RUN_INST_CMPL;
  86. break;
  87. case PM_RUN_INST_CMPL:
  88. alt[j++] = PM_INST_CMPL;
  89. break;
  90. }
  91. }
  92. num_alt = j;
  93. }
  94. return num_alt;
  95. }
  96. GENERIC_EVENT_ATTR(cpu-cycles, PM_CYC);
  97. GENERIC_EVENT_ATTR(stalled-cycles-frontend, PM_GCT_NOSLOT_CYC);
  98. GENERIC_EVENT_ATTR(stalled-cycles-backend, PM_CMPLU_STALL);
  99. GENERIC_EVENT_ATTR(instructions, PM_INST_CMPL);
  100. GENERIC_EVENT_ATTR(branch-instructions, PM_BRU_FIN);
  101. GENERIC_EVENT_ATTR(branch-misses, PM_BR_MPRED_CMPL);
  102. GENERIC_EVENT_ATTR(cache-references, PM_LD_REF_L1);
  103. GENERIC_EVENT_ATTR(cache-misses, PM_LD_MISS_L1);
  104. CACHE_EVENT_ATTR(L1-dcache-load-misses, PM_LD_MISS_L1);
  105. CACHE_EVENT_ATTR(L1-dcache-loads, PM_LD_REF_L1);
  106. CACHE_EVENT_ATTR(L1-dcache-prefetches, PM_L1_PREF);
  107. CACHE_EVENT_ATTR(L1-dcache-store-misses, PM_ST_MISS_L1);
  108. CACHE_EVENT_ATTR(L1-icache-load-misses, PM_L1_ICACHE_MISS);
  109. CACHE_EVENT_ATTR(L1-icache-loads, PM_INST_FROM_L1);
  110. CACHE_EVENT_ATTR(L1-icache-prefetches, PM_IC_PREF_WRITE);
  111. CACHE_EVENT_ATTR(LLC-load-misses, PM_DATA_FROM_L3MISS);
  112. CACHE_EVENT_ATTR(LLC-loads, PM_DATA_FROM_L3);
  113. CACHE_EVENT_ATTR(LLC-prefetches, PM_L3_PREF_ALL);
  114. CACHE_EVENT_ATTR(LLC-store-misses, PM_L2_ST_MISS);
  115. CACHE_EVENT_ATTR(LLC-stores, PM_L2_ST);
  116. CACHE_EVENT_ATTR(branch-load-misses, PM_BR_MPRED_CMPL);
  117. CACHE_EVENT_ATTR(branch-loads, PM_BRU_FIN);
  118. CACHE_EVENT_ATTR(dTLB-load-misses, PM_DTLB_MISS);
  119. CACHE_EVENT_ATTR(iTLB-load-misses, PM_ITLB_MISS);
  120. static struct attribute *power8_events_attr[] = {
  121. GENERIC_EVENT_PTR(PM_CYC),
  122. GENERIC_EVENT_PTR(PM_GCT_NOSLOT_CYC),
  123. GENERIC_EVENT_PTR(PM_CMPLU_STALL),
  124. GENERIC_EVENT_PTR(PM_INST_CMPL),
  125. GENERIC_EVENT_PTR(PM_BRU_FIN),
  126. GENERIC_EVENT_PTR(PM_BR_MPRED_CMPL),
  127. GENERIC_EVENT_PTR(PM_LD_REF_L1),
  128. GENERIC_EVENT_PTR(PM_LD_MISS_L1),
  129. CACHE_EVENT_PTR(PM_LD_MISS_L1),
  130. CACHE_EVENT_PTR(PM_LD_REF_L1),
  131. CACHE_EVENT_PTR(PM_L1_PREF),
  132. CACHE_EVENT_PTR(PM_ST_MISS_L1),
  133. CACHE_EVENT_PTR(PM_L1_ICACHE_MISS),
  134. CACHE_EVENT_PTR(PM_INST_FROM_L1),
  135. CACHE_EVENT_PTR(PM_IC_PREF_WRITE),
  136. CACHE_EVENT_PTR(PM_DATA_FROM_L3MISS),
  137. CACHE_EVENT_PTR(PM_DATA_FROM_L3),
  138. CACHE_EVENT_PTR(PM_L3_PREF_ALL),
  139. CACHE_EVENT_PTR(PM_L2_ST_MISS),
  140. CACHE_EVENT_PTR(PM_L2_ST),
  141. CACHE_EVENT_PTR(PM_BR_MPRED_CMPL),
  142. CACHE_EVENT_PTR(PM_BRU_FIN),
  143. CACHE_EVENT_PTR(PM_DTLB_MISS),
  144. CACHE_EVENT_PTR(PM_ITLB_MISS),
  145. NULL
  146. };
  147. static struct attribute_group power8_pmu_events_group = {
  148. .name = "events",
  149. .attrs = power8_events_attr,
  150. };
  151. PMU_FORMAT_ATTR(event, "config:0-49");
  152. PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
  153. PMU_FORMAT_ATTR(mark, "config:8");
  154. PMU_FORMAT_ATTR(combine, "config:11");
  155. PMU_FORMAT_ATTR(unit, "config:12-15");
  156. PMU_FORMAT_ATTR(pmc, "config:16-19");
  157. PMU_FORMAT_ATTR(cache_sel, "config:20-23");
  158. PMU_FORMAT_ATTR(sample_mode, "config:24-28");
  159. PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
  160. PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
  161. PMU_FORMAT_ATTR(thresh_start, "config:36-39");
  162. PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
  163. static struct attribute *power8_pmu_format_attr[] = {
  164. &format_attr_event.attr,
  165. &format_attr_pmcxsel.attr,
  166. &format_attr_mark.attr,
  167. &format_attr_combine.attr,
  168. &format_attr_unit.attr,
  169. &format_attr_pmc.attr,
  170. &format_attr_cache_sel.attr,
  171. &format_attr_sample_mode.attr,
  172. &format_attr_thresh_sel.attr,
  173. &format_attr_thresh_stop.attr,
  174. &format_attr_thresh_start.attr,
  175. &format_attr_thresh_cmp.attr,
  176. NULL,
  177. };
  178. static struct attribute_group power8_pmu_format_group = {
  179. .name = "format",
  180. .attrs = power8_pmu_format_attr,
  181. };
  182. static const struct attribute_group *power8_pmu_attr_groups[] = {
  183. &power8_pmu_format_group,
  184. &power8_pmu_events_group,
  185. NULL,
  186. };
  187. static int power8_generic_events[] = {
  188. [PERF_COUNT_HW_CPU_CYCLES] = PM_CYC,
  189. [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = PM_GCT_NOSLOT_CYC,
  190. [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = PM_CMPLU_STALL,
  191. [PERF_COUNT_HW_INSTRUCTIONS] = PM_INST_CMPL,
  192. [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = PM_BRU_FIN,
  193. [PERF_COUNT_HW_BRANCH_MISSES] = PM_BR_MPRED_CMPL,
  194. [PERF_COUNT_HW_CACHE_REFERENCES] = PM_LD_REF_L1,
  195. [PERF_COUNT_HW_CACHE_MISSES] = PM_LD_MISS_L1,
  196. };
  197. static u64 power8_bhrb_filter_map(u64 branch_sample_type)
  198. {
  199. u64 pmu_bhrb_filter = 0;
  200. /* BHRB and regular PMU events share the same privilege state
  201. * filter configuration. BHRB is always recorded along with a
  202. * regular PMU event. As the privilege state filter is handled
  203. * in the basic PMC configuration of the accompanying regular
  204. * PMU event, we ignore any separate BHRB specific request.
  205. */
  206. /* No branch filter requested */
  207. if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY)
  208. return pmu_bhrb_filter;
  209. /* Invalid branch filter options - HW does not support */
  210. if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_RETURN)
  211. return -1;
  212. if (branch_sample_type & PERF_SAMPLE_BRANCH_IND_CALL)
  213. return -1;
  214. if (branch_sample_type & PERF_SAMPLE_BRANCH_CALL)
  215. return -1;
  216. if (branch_sample_type & PERF_SAMPLE_BRANCH_ANY_CALL) {
  217. pmu_bhrb_filter |= POWER8_MMCRA_IFM1;
  218. return pmu_bhrb_filter;
  219. }
  220. /* Every thing else is unsupported */
  221. return -1;
  222. }
  223. static void power8_config_bhrb(u64 pmu_bhrb_filter)
  224. {
  225. /* Enable BHRB filter in PMU */
  226. mtspr(SPRN_MMCRA, (mfspr(SPRN_MMCRA) | pmu_bhrb_filter));
  227. }
  228. #define C(x) PERF_COUNT_HW_CACHE_##x
  229. /*
  230. * Table of generalized cache-related events.
  231. * 0 means not supported, -1 means nonsensical, other values
  232. * are event codes.
  233. */
  234. static int power8_cache_events[C(MAX)][C(OP_MAX)][C(RESULT_MAX)] = {
  235. [ C(L1D) ] = {
  236. [ C(OP_READ) ] = {
  237. [ C(RESULT_ACCESS) ] = PM_LD_REF_L1,
  238. [ C(RESULT_MISS) ] = PM_LD_MISS_L1,
  239. },
  240. [ C(OP_WRITE) ] = {
  241. [ C(RESULT_ACCESS) ] = 0,
  242. [ C(RESULT_MISS) ] = PM_ST_MISS_L1,
  243. },
  244. [ C(OP_PREFETCH) ] = {
  245. [ C(RESULT_ACCESS) ] = PM_L1_PREF,
  246. [ C(RESULT_MISS) ] = 0,
  247. },
  248. },
  249. [ C(L1I) ] = {
  250. [ C(OP_READ) ] = {
  251. [ C(RESULT_ACCESS) ] = PM_INST_FROM_L1,
  252. [ C(RESULT_MISS) ] = PM_L1_ICACHE_MISS,
  253. },
  254. [ C(OP_WRITE) ] = {
  255. [ C(RESULT_ACCESS) ] = PM_L1_DEMAND_WRITE,
  256. [ C(RESULT_MISS) ] = -1,
  257. },
  258. [ C(OP_PREFETCH) ] = {
  259. [ C(RESULT_ACCESS) ] = PM_IC_PREF_WRITE,
  260. [ C(RESULT_MISS) ] = 0,
  261. },
  262. },
  263. [ C(LL) ] = {
  264. [ C(OP_READ) ] = {
  265. [ C(RESULT_ACCESS) ] = PM_DATA_FROM_L3,
  266. [ C(RESULT_MISS) ] = PM_DATA_FROM_L3MISS,
  267. },
  268. [ C(OP_WRITE) ] = {
  269. [ C(RESULT_ACCESS) ] = PM_L2_ST,
  270. [ C(RESULT_MISS) ] = PM_L2_ST_MISS,
  271. },
  272. [ C(OP_PREFETCH) ] = {
  273. [ C(RESULT_ACCESS) ] = PM_L3_PREF_ALL,
  274. [ C(RESULT_MISS) ] = 0,
  275. },
  276. },
  277. [ C(DTLB) ] = {
  278. [ C(OP_READ) ] = {
  279. [ C(RESULT_ACCESS) ] = 0,
  280. [ C(RESULT_MISS) ] = PM_DTLB_MISS,
  281. },
  282. [ C(OP_WRITE) ] = {
  283. [ C(RESULT_ACCESS) ] = -1,
  284. [ C(RESULT_MISS) ] = -1,
  285. },
  286. [ C(OP_PREFETCH) ] = {
  287. [ C(RESULT_ACCESS) ] = -1,
  288. [ C(RESULT_MISS) ] = -1,
  289. },
  290. },
  291. [ C(ITLB) ] = {
  292. [ C(OP_READ) ] = {
  293. [ C(RESULT_ACCESS) ] = 0,
  294. [ C(RESULT_MISS) ] = PM_ITLB_MISS,
  295. },
  296. [ C(OP_WRITE) ] = {
  297. [ C(RESULT_ACCESS) ] = -1,
  298. [ C(RESULT_MISS) ] = -1,
  299. },
  300. [ C(OP_PREFETCH) ] = {
  301. [ C(RESULT_ACCESS) ] = -1,
  302. [ C(RESULT_MISS) ] = -1,
  303. },
  304. },
  305. [ C(BPU) ] = {
  306. [ C(OP_READ) ] = {
  307. [ C(RESULT_ACCESS) ] = PM_BRU_FIN,
  308. [ C(RESULT_MISS) ] = PM_BR_MPRED_CMPL,
  309. },
  310. [ C(OP_WRITE) ] = {
  311. [ C(RESULT_ACCESS) ] = -1,
  312. [ C(RESULT_MISS) ] = -1,
  313. },
  314. [ C(OP_PREFETCH) ] = {
  315. [ C(RESULT_ACCESS) ] = -1,
  316. [ C(RESULT_MISS) ] = -1,
  317. },
  318. },
  319. [ C(NODE) ] = {
  320. [ C(OP_READ) ] = {
  321. [ C(RESULT_ACCESS) ] = -1,
  322. [ C(RESULT_MISS) ] = -1,
  323. },
  324. [ C(OP_WRITE) ] = {
  325. [ C(RESULT_ACCESS) ] = -1,
  326. [ C(RESULT_MISS) ] = -1,
  327. },
  328. [ C(OP_PREFETCH) ] = {
  329. [ C(RESULT_ACCESS) ] = -1,
  330. [ C(RESULT_MISS) ] = -1,
  331. },
  332. },
  333. };
  334. #undef C
  335. static struct power_pmu power8_pmu = {
  336. .name = "POWER8",
  337. .n_counter = MAX_PMU_COUNTERS,
  338. .max_alternatives = MAX_ALT + 1,
  339. .add_fields = ISA207_ADD_FIELDS,
  340. .test_adder = ISA207_TEST_ADDER,
  341. .compute_mmcr = isa207_compute_mmcr,
  342. .config_bhrb = power8_config_bhrb,
  343. .bhrb_filter_map = power8_bhrb_filter_map,
  344. .get_constraint = isa207_get_constraint,
  345. .get_alternatives = power8_get_alternatives,
  346. .disable_pmc = isa207_disable_pmc,
  347. .flags = PPMU_HAS_SIER | PPMU_ARCH_207S,
  348. .n_generic = ARRAY_SIZE(power8_generic_events),
  349. .generic_events = power8_generic_events,
  350. .cache_events = &power8_cache_events,
  351. .attr_groups = power8_pmu_attr_groups,
  352. .bhrb_nr = 32,
  353. };
  354. static int __init init_power8_pmu(void)
  355. {
  356. int rc;
  357. if (!cur_cpu_spec->oprofile_cpu_type ||
  358. strcmp(cur_cpu_spec->oprofile_cpu_type, "ppc64/power8"))
  359. return -ENODEV;
  360. rc = register_power_pmu(&power8_pmu);
  361. if (rc)
  362. return rc;
  363. /* Tell userspace that EBB is supported */
  364. cur_cpu_spec->cpu_user_features2 |= PPC_FEATURE2_EBB;
  365. if (cpu_has_feature(CPU_FTR_PMAO_BUG))
  366. pr_info("PMAO restore workaround active.\n");
  367. return 0;
  368. }
  369. early_initcall(init_power8_pmu);