hist.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <inttypes.h>
  3. #include <math.h>
  4. #include <linux/compiler.h>
  5. #include "../util/hist.h"
  6. #include "../util/util.h"
  7. #include "../util/sort.h"
  8. #include "../util/evsel.h"
  9. #include "../util/evlist.h"
  10. /* hist period print (hpp) functions */
  11. #define hpp__call_print_fn(hpp, fn, fmt, ...) \
  12. ({ \
  13. int __ret = fn(hpp, fmt, ##__VA_ARGS__); \
  14. advance_hpp(hpp, __ret); \
  15. __ret; \
  16. })
  17. static int __hpp__fmt(struct perf_hpp *hpp, struct hist_entry *he,
  18. hpp_field_fn get_field, const char *fmt, int len,
  19. hpp_snprint_fn print_fn, bool fmt_percent)
  20. {
  21. int ret;
  22. struct hists *hists = he->hists;
  23. struct perf_evsel *evsel = hists_to_evsel(hists);
  24. char *buf = hpp->buf;
  25. size_t size = hpp->size;
  26. if (fmt_percent) {
  27. double percent = 0.0;
  28. u64 total = hists__total_period(hists);
  29. if (total)
  30. percent = 100.0 * get_field(he) / total;
  31. ret = hpp__call_print_fn(hpp, print_fn, fmt, len, percent);
  32. } else
  33. ret = hpp__call_print_fn(hpp, print_fn, fmt, len, get_field(he));
  34. if (perf_evsel__is_group_event(evsel)) {
  35. int prev_idx, idx_delta;
  36. struct hist_entry *pair;
  37. int nr_members = evsel->nr_members;
  38. prev_idx = perf_evsel__group_idx(evsel);
  39. list_for_each_entry(pair, &he->pairs.head, pairs.node) {
  40. u64 period = get_field(pair);
  41. u64 total = hists__total_period(pair->hists);
  42. if (!total)
  43. continue;
  44. evsel = hists_to_evsel(pair->hists);
  45. idx_delta = perf_evsel__group_idx(evsel) - prev_idx - 1;
  46. while (idx_delta--) {
  47. /*
  48. * zero-fill group members in the middle which
  49. * have no sample
  50. */
  51. if (fmt_percent) {
  52. ret += hpp__call_print_fn(hpp, print_fn,
  53. fmt, len, 0.0);
  54. } else {
  55. ret += hpp__call_print_fn(hpp, print_fn,
  56. fmt, len, 0ULL);
  57. }
  58. }
  59. if (fmt_percent) {
  60. ret += hpp__call_print_fn(hpp, print_fn, fmt, len,
  61. 100.0 * period / total);
  62. } else {
  63. ret += hpp__call_print_fn(hpp, print_fn, fmt,
  64. len, period);
  65. }
  66. prev_idx = perf_evsel__group_idx(evsel);
  67. }
  68. idx_delta = nr_members - prev_idx - 1;
  69. while (idx_delta--) {
  70. /*
  71. * zero-fill group members at last which have no sample
  72. */
  73. if (fmt_percent) {
  74. ret += hpp__call_print_fn(hpp, print_fn,
  75. fmt, len, 0.0);
  76. } else {
  77. ret += hpp__call_print_fn(hpp, print_fn,
  78. fmt, len, 0ULL);
  79. }
  80. }
  81. }
  82. /*
  83. * Restore original buf and size as it's where caller expects
  84. * the result will be saved.
  85. */
  86. hpp->buf = buf;
  87. hpp->size = size;
  88. return ret;
  89. }
  90. int hpp__fmt(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  91. struct hist_entry *he, hpp_field_fn get_field,
  92. const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
  93. {
  94. int len = fmt->user_len ?: fmt->len;
  95. if (symbol_conf.field_sep) {
  96. return __hpp__fmt(hpp, he, get_field, fmtstr, 1,
  97. print_fn, fmt_percent);
  98. }
  99. if (fmt_percent)
  100. len -= 2; /* 2 for a space and a % sign */
  101. else
  102. len -= 1;
  103. return __hpp__fmt(hpp, he, get_field, fmtstr, len, print_fn, fmt_percent);
  104. }
  105. int hpp__fmt_acc(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  106. struct hist_entry *he, hpp_field_fn get_field,
  107. const char *fmtstr, hpp_snprint_fn print_fn, bool fmt_percent)
  108. {
  109. if (!symbol_conf.cumulate_callchain) {
  110. int len = fmt->user_len ?: fmt->len;
  111. return snprintf(hpp->buf, hpp->size, " %*s", len - 1, "N/A");
  112. }
  113. return hpp__fmt(fmt, hpp, he, get_field, fmtstr, print_fn, fmt_percent);
  114. }
  115. static int field_cmp(u64 field_a, u64 field_b)
  116. {
  117. if (field_a > field_b)
  118. return 1;
  119. if (field_a < field_b)
  120. return -1;
  121. return 0;
  122. }
  123. static int __hpp__sort(struct hist_entry *a, struct hist_entry *b,
  124. hpp_field_fn get_field)
  125. {
  126. s64 ret;
  127. int i, nr_members;
  128. struct perf_evsel *evsel;
  129. struct hist_entry *pair;
  130. u64 *fields_a, *fields_b;
  131. ret = field_cmp(get_field(a), get_field(b));
  132. if (ret || !symbol_conf.event_group)
  133. return ret;
  134. evsel = hists_to_evsel(a->hists);
  135. if (!perf_evsel__is_group_event(evsel))
  136. return ret;
  137. nr_members = evsel->nr_members;
  138. fields_a = calloc(nr_members, sizeof(*fields_a));
  139. fields_b = calloc(nr_members, sizeof(*fields_b));
  140. if (!fields_a || !fields_b)
  141. goto out;
  142. list_for_each_entry(pair, &a->pairs.head, pairs.node) {
  143. evsel = hists_to_evsel(pair->hists);
  144. fields_a[perf_evsel__group_idx(evsel)] = get_field(pair);
  145. }
  146. list_for_each_entry(pair, &b->pairs.head, pairs.node) {
  147. evsel = hists_to_evsel(pair->hists);
  148. fields_b[perf_evsel__group_idx(evsel)] = get_field(pair);
  149. }
  150. for (i = 1; i < nr_members; i++) {
  151. ret = field_cmp(fields_a[i], fields_b[i]);
  152. if (ret)
  153. break;
  154. }
  155. out:
  156. free(fields_a);
  157. free(fields_b);
  158. return ret;
  159. }
  160. static int __hpp__sort_acc(struct hist_entry *a, struct hist_entry *b,
  161. hpp_field_fn get_field)
  162. {
  163. s64 ret = 0;
  164. if (symbol_conf.cumulate_callchain) {
  165. /*
  166. * Put caller above callee when they have equal period.
  167. */
  168. ret = field_cmp(get_field(a), get_field(b));
  169. if (ret)
  170. return ret;
  171. if (a->thread != b->thread || !hist_entry__has_callchains(a) || !symbol_conf.use_callchain)
  172. return 0;
  173. ret = b->callchain->max_depth - a->callchain->max_depth;
  174. if (callchain_param.order == ORDER_CALLER)
  175. ret = -ret;
  176. }
  177. return ret;
  178. }
  179. static int hpp__width_fn(struct perf_hpp_fmt *fmt,
  180. struct perf_hpp *hpp __maybe_unused,
  181. struct hists *hists)
  182. {
  183. int len = fmt->user_len ?: fmt->len;
  184. struct perf_evsel *evsel = hists_to_evsel(hists);
  185. if (symbol_conf.event_group)
  186. len = max(len, evsel->nr_members * fmt->len);
  187. if (len < (int)strlen(fmt->name))
  188. len = strlen(fmt->name);
  189. return len;
  190. }
  191. static int hpp__header_fn(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
  192. struct hists *hists, int line __maybe_unused,
  193. int *span __maybe_unused)
  194. {
  195. int len = hpp__width_fn(fmt, hpp, hists);
  196. return scnprintf(hpp->buf, hpp->size, "%*s", len, fmt->name);
  197. }
  198. int hpp_color_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
  199. {
  200. va_list args;
  201. ssize_t ssize = hpp->size;
  202. double percent;
  203. int ret, len;
  204. va_start(args, fmt);
  205. len = va_arg(args, int);
  206. percent = va_arg(args, double);
  207. ret = percent_color_len_snprintf(hpp->buf, hpp->size, fmt, len, percent);
  208. va_end(args);
  209. return (ret >= ssize) ? (ssize - 1) : ret;
  210. }
  211. static int hpp_entry_scnprintf(struct perf_hpp *hpp, const char *fmt, ...)
  212. {
  213. va_list args;
  214. ssize_t ssize = hpp->size;
  215. int ret;
  216. va_start(args, fmt);
  217. ret = vsnprintf(hpp->buf, hpp->size, fmt, args);
  218. va_end(args);
  219. return (ret >= ssize) ? (ssize - 1) : ret;
  220. }
  221. #define __HPP_COLOR_PERCENT_FN(_type, _field) \
  222. static u64 he_get_##_field(struct hist_entry *he) \
  223. { \
  224. return he->stat._field; \
  225. } \
  226. \
  227. static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
  228. struct perf_hpp *hpp, struct hist_entry *he) \
  229. { \
  230. return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
  231. hpp_color_scnprintf, true); \
  232. }
  233. #define __HPP_ENTRY_PERCENT_FN(_type, _field) \
  234. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  235. struct perf_hpp *hpp, struct hist_entry *he) \
  236. { \
  237. return hpp__fmt(fmt, hpp, he, he_get_##_field, " %*.2f%%", \
  238. hpp_entry_scnprintf, true); \
  239. }
  240. #define __HPP_SORT_FN(_type, _field) \
  241. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  242. struct hist_entry *a, struct hist_entry *b) \
  243. { \
  244. return __hpp__sort(a, b, he_get_##_field); \
  245. }
  246. #define __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
  247. static u64 he_get_acc_##_field(struct hist_entry *he) \
  248. { \
  249. return he->stat_acc->_field; \
  250. } \
  251. \
  252. static int hpp__color_##_type(struct perf_hpp_fmt *fmt, \
  253. struct perf_hpp *hpp, struct hist_entry *he) \
  254. { \
  255. return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
  256. hpp_color_scnprintf, true); \
  257. }
  258. #define __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
  259. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  260. struct perf_hpp *hpp, struct hist_entry *he) \
  261. { \
  262. return hpp__fmt_acc(fmt, hpp, he, he_get_acc_##_field, " %*.2f%%", \
  263. hpp_entry_scnprintf, true); \
  264. }
  265. #define __HPP_SORT_ACC_FN(_type, _field) \
  266. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  267. struct hist_entry *a, struct hist_entry *b) \
  268. { \
  269. return __hpp__sort_acc(a, b, he_get_acc_##_field); \
  270. }
  271. #define __HPP_ENTRY_RAW_FN(_type, _field) \
  272. static u64 he_get_raw_##_field(struct hist_entry *he) \
  273. { \
  274. return he->stat._field; \
  275. } \
  276. \
  277. static int hpp__entry_##_type(struct perf_hpp_fmt *fmt, \
  278. struct perf_hpp *hpp, struct hist_entry *he) \
  279. { \
  280. return hpp__fmt(fmt, hpp, he, he_get_raw_##_field, " %*"PRIu64, \
  281. hpp_entry_scnprintf, false); \
  282. }
  283. #define __HPP_SORT_RAW_FN(_type, _field) \
  284. static int64_t hpp__sort_##_type(struct perf_hpp_fmt *fmt __maybe_unused, \
  285. struct hist_entry *a, struct hist_entry *b) \
  286. { \
  287. return __hpp__sort(a, b, he_get_raw_##_field); \
  288. }
  289. #define HPP_PERCENT_FNS(_type, _field) \
  290. __HPP_COLOR_PERCENT_FN(_type, _field) \
  291. __HPP_ENTRY_PERCENT_FN(_type, _field) \
  292. __HPP_SORT_FN(_type, _field)
  293. #define HPP_PERCENT_ACC_FNS(_type, _field) \
  294. __HPP_COLOR_ACC_PERCENT_FN(_type, _field) \
  295. __HPP_ENTRY_ACC_PERCENT_FN(_type, _field) \
  296. __HPP_SORT_ACC_FN(_type, _field)
  297. #define HPP_RAW_FNS(_type, _field) \
  298. __HPP_ENTRY_RAW_FN(_type, _field) \
  299. __HPP_SORT_RAW_FN(_type, _field)
  300. HPP_PERCENT_FNS(overhead, period)
  301. HPP_PERCENT_FNS(overhead_sys, period_sys)
  302. HPP_PERCENT_FNS(overhead_us, period_us)
  303. HPP_PERCENT_FNS(overhead_guest_sys, period_guest_sys)
  304. HPP_PERCENT_FNS(overhead_guest_us, period_guest_us)
  305. HPP_PERCENT_ACC_FNS(overhead_acc, period)
  306. HPP_RAW_FNS(samples, nr_events)
  307. HPP_RAW_FNS(period, period)
  308. static int64_t hpp__nop_cmp(struct perf_hpp_fmt *fmt __maybe_unused,
  309. struct hist_entry *a __maybe_unused,
  310. struct hist_entry *b __maybe_unused)
  311. {
  312. return 0;
  313. }
  314. static bool perf_hpp__is_hpp_entry(struct perf_hpp_fmt *a)
  315. {
  316. return a->header == hpp__header_fn;
  317. }
  318. static bool hpp__equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  319. {
  320. if (!perf_hpp__is_hpp_entry(a) || !perf_hpp__is_hpp_entry(b))
  321. return false;
  322. return a->idx == b->idx;
  323. }
  324. #define HPP__COLOR_PRINT_FNS(_name, _fn, _idx) \
  325. { \
  326. .name = _name, \
  327. .header = hpp__header_fn, \
  328. .width = hpp__width_fn, \
  329. .color = hpp__color_ ## _fn, \
  330. .entry = hpp__entry_ ## _fn, \
  331. .cmp = hpp__nop_cmp, \
  332. .collapse = hpp__nop_cmp, \
  333. .sort = hpp__sort_ ## _fn, \
  334. .idx = PERF_HPP__ ## _idx, \
  335. .equal = hpp__equal, \
  336. }
  337. #define HPP__COLOR_ACC_PRINT_FNS(_name, _fn, _idx) \
  338. { \
  339. .name = _name, \
  340. .header = hpp__header_fn, \
  341. .width = hpp__width_fn, \
  342. .color = hpp__color_ ## _fn, \
  343. .entry = hpp__entry_ ## _fn, \
  344. .cmp = hpp__nop_cmp, \
  345. .collapse = hpp__nop_cmp, \
  346. .sort = hpp__sort_ ## _fn, \
  347. .idx = PERF_HPP__ ## _idx, \
  348. .equal = hpp__equal, \
  349. }
  350. #define HPP__PRINT_FNS(_name, _fn, _idx) \
  351. { \
  352. .name = _name, \
  353. .header = hpp__header_fn, \
  354. .width = hpp__width_fn, \
  355. .entry = hpp__entry_ ## _fn, \
  356. .cmp = hpp__nop_cmp, \
  357. .collapse = hpp__nop_cmp, \
  358. .sort = hpp__sort_ ## _fn, \
  359. .idx = PERF_HPP__ ## _idx, \
  360. .equal = hpp__equal, \
  361. }
  362. struct perf_hpp_fmt perf_hpp__format[] = {
  363. HPP__COLOR_PRINT_FNS("Overhead", overhead, OVERHEAD),
  364. HPP__COLOR_PRINT_FNS("sys", overhead_sys, OVERHEAD_SYS),
  365. HPP__COLOR_PRINT_FNS("usr", overhead_us, OVERHEAD_US),
  366. HPP__COLOR_PRINT_FNS("guest sys", overhead_guest_sys, OVERHEAD_GUEST_SYS),
  367. HPP__COLOR_PRINT_FNS("guest usr", overhead_guest_us, OVERHEAD_GUEST_US),
  368. HPP__COLOR_ACC_PRINT_FNS("Children", overhead_acc, OVERHEAD_ACC),
  369. HPP__PRINT_FNS("Samples", samples, SAMPLES),
  370. HPP__PRINT_FNS("Period", period, PERIOD)
  371. };
  372. struct perf_hpp_list perf_hpp_list = {
  373. .fields = LIST_HEAD_INIT(perf_hpp_list.fields),
  374. .sorts = LIST_HEAD_INIT(perf_hpp_list.sorts),
  375. .nr_header_lines = 1,
  376. };
  377. #undef HPP__COLOR_PRINT_FNS
  378. #undef HPP__COLOR_ACC_PRINT_FNS
  379. #undef HPP__PRINT_FNS
  380. #undef HPP_PERCENT_FNS
  381. #undef HPP_PERCENT_ACC_FNS
  382. #undef HPP_RAW_FNS
  383. #undef __HPP_HEADER_FN
  384. #undef __HPP_WIDTH_FN
  385. #undef __HPP_COLOR_PERCENT_FN
  386. #undef __HPP_ENTRY_PERCENT_FN
  387. #undef __HPP_COLOR_ACC_PERCENT_FN
  388. #undef __HPP_ENTRY_ACC_PERCENT_FN
  389. #undef __HPP_ENTRY_RAW_FN
  390. #undef __HPP_SORT_FN
  391. #undef __HPP_SORT_ACC_FN
  392. #undef __HPP_SORT_RAW_FN
  393. void perf_hpp__init(void)
  394. {
  395. int i;
  396. for (i = 0; i < PERF_HPP__MAX_INDEX; i++) {
  397. struct perf_hpp_fmt *fmt = &perf_hpp__format[i];
  398. INIT_LIST_HEAD(&fmt->list);
  399. /* sort_list may be linked by setup_sorting() */
  400. if (fmt->sort_list.next == NULL)
  401. INIT_LIST_HEAD(&fmt->sort_list);
  402. }
  403. /*
  404. * If user specified field order, no need to setup default fields.
  405. */
  406. if (is_strict_order(field_order))
  407. return;
  408. if (symbol_conf.cumulate_callchain) {
  409. hpp_dimension__add_output(PERF_HPP__OVERHEAD_ACC);
  410. perf_hpp__format[PERF_HPP__OVERHEAD].name = "Self";
  411. }
  412. hpp_dimension__add_output(PERF_HPP__OVERHEAD);
  413. if (symbol_conf.show_cpu_utilization) {
  414. hpp_dimension__add_output(PERF_HPP__OVERHEAD_SYS);
  415. hpp_dimension__add_output(PERF_HPP__OVERHEAD_US);
  416. if (perf_guest) {
  417. hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_SYS);
  418. hpp_dimension__add_output(PERF_HPP__OVERHEAD_GUEST_US);
  419. }
  420. }
  421. if (symbol_conf.show_nr_samples)
  422. hpp_dimension__add_output(PERF_HPP__SAMPLES);
  423. if (symbol_conf.show_total_period)
  424. hpp_dimension__add_output(PERF_HPP__PERIOD);
  425. }
  426. void perf_hpp_list__column_register(struct perf_hpp_list *list,
  427. struct perf_hpp_fmt *format)
  428. {
  429. list_add_tail(&format->list, &list->fields);
  430. }
  431. void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
  432. struct perf_hpp_fmt *format)
  433. {
  434. list_add_tail(&format->sort_list, &list->sorts);
  435. }
  436. void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
  437. struct perf_hpp_fmt *format)
  438. {
  439. list_add(&format->sort_list, &list->sorts);
  440. }
  441. void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
  442. {
  443. list_del_init(&format->list);
  444. }
  445. void perf_hpp__cancel_cumulate(void)
  446. {
  447. struct perf_hpp_fmt *fmt, *acc, *ovh, *tmp;
  448. if (is_strict_order(field_order))
  449. return;
  450. ovh = &perf_hpp__format[PERF_HPP__OVERHEAD];
  451. acc = &perf_hpp__format[PERF_HPP__OVERHEAD_ACC];
  452. perf_hpp_list__for_each_format_safe(&perf_hpp_list, fmt, tmp) {
  453. if (acc->equal(acc, fmt)) {
  454. perf_hpp__column_unregister(fmt);
  455. continue;
  456. }
  457. if (ovh->equal(ovh, fmt))
  458. fmt->name = "Overhead";
  459. }
  460. }
  461. static bool fmt_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
  462. {
  463. return a->equal && a->equal(a, b);
  464. }
  465. void perf_hpp__setup_output_field(struct perf_hpp_list *list)
  466. {
  467. struct perf_hpp_fmt *fmt;
  468. /* append sort keys to output field */
  469. perf_hpp_list__for_each_sort_list(list, fmt) {
  470. struct perf_hpp_fmt *pos;
  471. /* skip sort-only fields ("sort_compute" in perf diff) */
  472. if (!fmt->entry && !fmt->color)
  473. continue;
  474. perf_hpp_list__for_each_format(list, pos) {
  475. if (fmt_equal(fmt, pos))
  476. goto next;
  477. }
  478. perf_hpp__column_register(fmt);
  479. next:
  480. continue;
  481. }
  482. }
  483. void perf_hpp__append_sort_keys(struct perf_hpp_list *list)
  484. {
  485. struct perf_hpp_fmt *fmt;
  486. /* append output fields to sort keys */
  487. perf_hpp_list__for_each_format(list, fmt) {
  488. struct perf_hpp_fmt *pos;
  489. perf_hpp_list__for_each_sort_list(list, pos) {
  490. if (fmt_equal(fmt, pos))
  491. goto next;
  492. }
  493. perf_hpp__register_sort_field(fmt);
  494. next:
  495. continue;
  496. }
  497. }
  498. static void fmt_free(struct perf_hpp_fmt *fmt)
  499. {
  500. /*
  501. * At this point fmt should be completely
  502. * unhooked, if not it's a bug.
  503. */
  504. BUG_ON(!list_empty(&fmt->list));
  505. BUG_ON(!list_empty(&fmt->sort_list));
  506. if (fmt->free)
  507. fmt->free(fmt);
  508. }
  509. void perf_hpp__reset_output_field(struct perf_hpp_list *list)
  510. {
  511. struct perf_hpp_fmt *fmt, *tmp;
  512. /* reset output fields */
  513. perf_hpp_list__for_each_format_safe(list, fmt, tmp) {
  514. list_del_init(&fmt->list);
  515. list_del_init(&fmt->sort_list);
  516. fmt_free(fmt);
  517. }
  518. /* reset sort keys */
  519. perf_hpp_list__for_each_sort_list_safe(list, fmt, tmp) {
  520. list_del_init(&fmt->list);
  521. list_del_init(&fmt->sort_list);
  522. fmt_free(fmt);
  523. }
  524. }
  525. /*
  526. * See hists__fprintf to match the column widths
  527. */
  528. unsigned int hists__sort_list_width(struct hists *hists)
  529. {
  530. struct perf_hpp_fmt *fmt;
  531. int ret = 0;
  532. bool first = true;
  533. struct perf_hpp dummy_hpp;
  534. hists__for_each_format(hists, fmt) {
  535. if (perf_hpp__should_skip(fmt, hists))
  536. continue;
  537. if (first)
  538. first = false;
  539. else
  540. ret += 2;
  541. ret += fmt->width(fmt, &dummy_hpp, hists);
  542. }
  543. if (verbose > 0 && hists__has(hists, sym)) /* Addr + origin */
  544. ret += 3 + BITS_PER_LONG / 4;
  545. return ret;
  546. }
  547. unsigned int hists__overhead_width(struct hists *hists)
  548. {
  549. struct perf_hpp_fmt *fmt;
  550. int ret = 0;
  551. bool first = true;
  552. struct perf_hpp dummy_hpp;
  553. hists__for_each_format(hists, fmt) {
  554. if (perf_hpp__is_sort_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
  555. break;
  556. if (first)
  557. first = false;
  558. else
  559. ret += 2;
  560. ret += fmt->width(fmt, &dummy_hpp, hists);
  561. }
  562. return ret;
  563. }
  564. void perf_hpp__reset_width(struct perf_hpp_fmt *fmt, struct hists *hists)
  565. {
  566. if (perf_hpp__is_sort_entry(fmt))
  567. return perf_hpp__reset_sort_width(fmt, hists);
  568. if (perf_hpp__is_dynamic_entry(fmt))
  569. return;
  570. BUG_ON(fmt->idx >= PERF_HPP__MAX_INDEX);
  571. switch (fmt->idx) {
  572. case PERF_HPP__OVERHEAD:
  573. case PERF_HPP__OVERHEAD_SYS:
  574. case PERF_HPP__OVERHEAD_US:
  575. case PERF_HPP__OVERHEAD_ACC:
  576. fmt->len = 8;
  577. break;
  578. case PERF_HPP__OVERHEAD_GUEST_SYS:
  579. case PERF_HPP__OVERHEAD_GUEST_US:
  580. fmt->len = 9;
  581. break;
  582. case PERF_HPP__SAMPLES:
  583. case PERF_HPP__PERIOD:
  584. fmt->len = 12;
  585. break;
  586. default:
  587. break;
  588. }
  589. }
  590. void hists__reset_column_width(struct hists *hists)
  591. {
  592. struct perf_hpp_fmt *fmt;
  593. struct perf_hpp_list_node *node;
  594. hists__for_each_format(hists, fmt)
  595. perf_hpp__reset_width(fmt, hists);
  596. /* hierarchy entries have their own hpp list */
  597. list_for_each_entry(node, &hists->hpp_formats, list) {
  598. perf_hpp_list__for_each_format(&node->hpp, fmt)
  599. perf_hpp__reset_width(fmt, hists);
  600. }
  601. }
  602. void perf_hpp__set_user_width(const char *width_list_str)
  603. {
  604. struct perf_hpp_fmt *fmt;
  605. const char *ptr = width_list_str;
  606. perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
  607. char *p;
  608. int len = strtol(ptr, &p, 10);
  609. fmt->user_len = len;
  610. if (*p == ',')
  611. ptr = p + 1;
  612. else
  613. break;
  614. }
  615. }
  616. static int add_hierarchy_fmt(struct hists *hists, struct perf_hpp_fmt *fmt)
  617. {
  618. struct perf_hpp_list_node *node = NULL;
  619. struct perf_hpp_fmt *fmt_copy;
  620. bool found = false;
  621. bool skip = perf_hpp__should_skip(fmt, hists);
  622. list_for_each_entry(node, &hists->hpp_formats, list) {
  623. if (node->level == fmt->level) {
  624. found = true;
  625. break;
  626. }
  627. }
  628. if (!found) {
  629. node = malloc(sizeof(*node));
  630. if (node == NULL)
  631. return -1;
  632. node->skip = skip;
  633. node->level = fmt->level;
  634. perf_hpp_list__init(&node->hpp);
  635. hists->nr_hpp_node++;
  636. list_add_tail(&node->list, &hists->hpp_formats);
  637. }
  638. fmt_copy = perf_hpp_fmt__dup(fmt);
  639. if (fmt_copy == NULL)
  640. return -1;
  641. if (!skip)
  642. node->skip = false;
  643. list_add_tail(&fmt_copy->list, &node->hpp.fields);
  644. list_add_tail(&fmt_copy->sort_list, &node->hpp.sorts);
  645. return 0;
  646. }
  647. int perf_hpp__setup_hists_formats(struct perf_hpp_list *list,
  648. struct perf_evlist *evlist)
  649. {
  650. struct perf_evsel *evsel;
  651. struct perf_hpp_fmt *fmt;
  652. struct hists *hists;
  653. int ret;
  654. if (!symbol_conf.report_hierarchy)
  655. return 0;
  656. evlist__for_each_entry(evlist, evsel) {
  657. hists = evsel__hists(evsel);
  658. perf_hpp_list__for_each_sort_list(list, fmt) {
  659. if (perf_hpp__is_dynamic_entry(fmt) &&
  660. !perf_hpp__defined_dynamic_entry(fmt, hists))
  661. continue;
  662. ret = add_hierarchy_fmt(hists, fmt);
  663. if (ret < 0)
  664. return ret;
  665. }
  666. }
  667. return 0;
  668. }