hist.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <stdio.h>
  3. #include <linux/string.h>
  4. #include "../../util/util.h"
  5. #include "../../util/hist.h"
  6. #include "../../util/sort.h"
  7. #include "../../util/evsel.h"
  8. #include "../../util/srcline.h"
  9. #include "../../util/string2.h"
  10. #include "../../util/thread.h"
  11. #include "../../util/sane_ctype.h"
  12. static size_t callchain__fprintf_left_margin(FILE *fp, int left_margin)
  13. {
  14. int i;
  15. int ret = fprintf(fp, " ");
  16. for (i = 0; i < left_margin; i++)
  17. ret += fprintf(fp, " ");
  18. return ret;
  19. }
  20. static size_t ipchain__fprintf_graph_line(FILE *fp, int depth, int depth_mask,
  21. int left_margin)
  22. {
  23. int i;
  24. size_t ret = callchain__fprintf_left_margin(fp, left_margin);
  25. for (i = 0; i < depth; i++)
  26. if (depth_mask & (1 << i))
  27. ret += fprintf(fp, "| ");
  28. else
  29. ret += fprintf(fp, " ");
  30. ret += fprintf(fp, "\n");
  31. return ret;
  32. }
  33. static size_t ipchain__fprintf_graph(FILE *fp, struct callchain_node *node,
  34. struct callchain_list *chain,
  35. int depth, int depth_mask, int period,
  36. u64 total_samples, int left_margin)
  37. {
  38. int i;
  39. size_t ret = 0;
  40. char bf[1024], *alloc_str = NULL;
  41. char buf[64];
  42. const char *str;
  43. ret += callchain__fprintf_left_margin(fp, left_margin);
  44. for (i = 0; i < depth; i++) {
  45. if (depth_mask & (1 << i))
  46. ret += fprintf(fp, "|");
  47. else
  48. ret += fprintf(fp, " ");
  49. if (!period && i == depth - 1) {
  50. ret += fprintf(fp, "--");
  51. ret += callchain_node__fprintf_value(node, fp, total_samples);
  52. ret += fprintf(fp, "--");
  53. } else
  54. ret += fprintf(fp, "%s", " ");
  55. }
  56. str = callchain_list__sym_name(chain, bf, sizeof(bf), false);
  57. if (symbol_conf.show_branchflag_count) {
  58. callchain_list_counts__printf_value(chain, NULL,
  59. buf, sizeof(buf));
  60. if (asprintf(&alloc_str, "%s%s", str, buf) < 0)
  61. str = "Not enough memory!";
  62. else
  63. str = alloc_str;
  64. }
  65. fputs(str, fp);
  66. fputc('\n', fp);
  67. free(alloc_str);
  68. return ret;
  69. }
  70. static struct symbol *rem_sq_bracket;
  71. static struct callchain_list rem_hits;
  72. static void init_rem_hits(void)
  73. {
  74. rem_sq_bracket = malloc(sizeof(*rem_sq_bracket) + 6);
  75. if (!rem_sq_bracket) {
  76. fprintf(stderr, "Not enough memory to display remaining hits\n");
  77. return;
  78. }
  79. strcpy(rem_sq_bracket->name, "[...]");
  80. rem_hits.ms.sym = rem_sq_bracket;
  81. }
  82. static size_t __callchain__fprintf_graph(FILE *fp, struct rb_root *root,
  83. u64 total_samples, int depth,
  84. int depth_mask, int left_margin)
  85. {
  86. struct rb_node *node, *next;
  87. struct callchain_node *child = NULL;
  88. struct callchain_list *chain;
  89. int new_depth_mask = depth_mask;
  90. u64 remaining;
  91. size_t ret = 0;
  92. int i;
  93. uint entries_printed = 0;
  94. int cumul_count = 0;
  95. remaining = total_samples;
  96. node = rb_first(root);
  97. while (node) {
  98. u64 new_total;
  99. u64 cumul;
  100. child = rb_entry(node, struct callchain_node, rb_node);
  101. cumul = callchain_cumul_hits(child);
  102. remaining -= cumul;
  103. cumul_count += callchain_cumul_counts(child);
  104. /*
  105. * The depth mask manages the output of pipes that show
  106. * the depth. We don't want to keep the pipes of the current
  107. * level for the last child of this depth.
  108. * Except if we have remaining filtered hits. They will
  109. * supersede the last child
  110. */
  111. next = rb_next(node);
  112. if (!next && (callchain_param.mode != CHAIN_GRAPH_REL || !remaining))
  113. new_depth_mask &= ~(1 << (depth - 1));
  114. /*
  115. * But we keep the older depth mask for the line separator
  116. * to keep the level link until we reach the last child
  117. */
  118. ret += ipchain__fprintf_graph_line(fp, depth, depth_mask,
  119. left_margin);
  120. i = 0;
  121. list_for_each_entry(chain, &child->val, list) {
  122. ret += ipchain__fprintf_graph(fp, child, chain, depth,
  123. new_depth_mask, i++,
  124. total_samples,
  125. left_margin);
  126. }
  127. if (callchain_param.mode == CHAIN_GRAPH_REL)
  128. new_total = child->children_hit;
  129. else
  130. new_total = total_samples;
  131. ret += __callchain__fprintf_graph(fp, &child->rb_root, new_total,
  132. depth + 1,
  133. new_depth_mask | (1 << depth),
  134. left_margin);
  135. node = next;
  136. if (++entries_printed == callchain_param.print_limit)
  137. break;
  138. }
  139. if (callchain_param.mode == CHAIN_GRAPH_REL &&
  140. remaining && remaining != total_samples) {
  141. struct callchain_node rem_node = {
  142. .hit = remaining,
  143. };
  144. if (!rem_sq_bracket)
  145. return ret;
  146. if (callchain_param.value == CCVAL_COUNT && child && child->parent) {
  147. rem_node.count = child->parent->children_count - cumul_count;
  148. if (rem_node.count <= 0)
  149. return ret;
  150. }
  151. new_depth_mask &= ~(1 << (depth - 1));
  152. ret += ipchain__fprintf_graph(fp, &rem_node, &rem_hits, depth,
  153. new_depth_mask, 0, total_samples,
  154. left_margin);
  155. }
  156. return ret;
  157. }
  158. /*
  159. * If have one single callchain root, don't bother printing
  160. * its percentage (100 % in fractal mode and the same percentage
  161. * than the hist in graph mode). This also avoid one level of column.
  162. *
  163. * However when percent-limit applied, it's possible that single callchain
  164. * node have different (non-100% in fractal mode) percentage.
  165. */
  166. static bool need_percent_display(struct rb_node *node, u64 parent_samples)
  167. {
  168. struct callchain_node *cnode;
  169. if (rb_next(node))
  170. return true;
  171. cnode = rb_entry(node, struct callchain_node, rb_node);
  172. return callchain_cumul_hits(cnode) != parent_samples;
  173. }
  174. static size_t callchain__fprintf_graph(FILE *fp, struct rb_root *root,
  175. u64 total_samples, u64 parent_samples,
  176. int left_margin)
  177. {
  178. struct callchain_node *cnode;
  179. struct callchain_list *chain;
  180. u32 entries_printed = 0;
  181. bool printed = false;
  182. struct rb_node *node;
  183. int i = 0;
  184. int ret = 0;
  185. char bf[1024];
  186. node = rb_first(root);
  187. if (node && !need_percent_display(node, parent_samples)) {
  188. cnode = rb_entry(node, struct callchain_node, rb_node);
  189. list_for_each_entry(chain, &cnode->val, list) {
  190. /*
  191. * If we sort by symbol, the first entry is the same than
  192. * the symbol. No need to print it otherwise it appears as
  193. * displayed twice.
  194. */
  195. if (!i++ && field_order == NULL &&
  196. sort_order && strstarts(sort_order, "sym"))
  197. continue;
  198. if (!printed) {
  199. ret += callchain__fprintf_left_margin(fp, left_margin);
  200. ret += fprintf(fp, "|\n");
  201. ret += callchain__fprintf_left_margin(fp, left_margin);
  202. ret += fprintf(fp, "---");
  203. left_margin += 3;
  204. printed = true;
  205. } else
  206. ret += callchain__fprintf_left_margin(fp, left_margin);
  207. ret += fprintf(fp, "%s",
  208. callchain_list__sym_name(chain, bf,
  209. sizeof(bf),
  210. false));
  211. if (symbol_conf.show_branchflag_count)
  212. ret += callchain_list_counts__printf_value(
  213. chain, fp, NULL, 0);
  214. ret += fprintf(fp, "\n");
  215. if (++entries_printed == callchain_param.print_limit)
  216. break;
  217. }
  218. root = &cnode->rb_root;
  219. }
  220. if (callchain_param.mode == CHAIN_GRAPH_REL)
  221. total_samples = parent_samples;
  222. ret += __callchain__fprintf_graph(fp, root, total_samples,
  223. 1, 1, left_margin);
  224. if (ret) {
  225. /* do not add a blank line if it printed nothing */
  226. ret += fprintf(fp, "\n");
  227. }
  228. return ret;
  229. }
  230. static size_t __callchain__fprintf_flat(FILE *fp, struct callchain_node *node,
  231. u64 total_samples)
  232. {
  233. struct callchain_list *chain;
  234. size_t ret = 0;
  235. char bf[1024];
  236. if (!node)
  237. return 0;
  238. ret += __callchain__fprintf_flat(fp, node->parent, total_samples);
  239. list_for_each_entry(chain, &node->val, list) {
  240. if (chain->ip >= PERF_CONTEXT_MAX)
  241. continue;
  242. ret += fprintf(fp, " %s\n", callchain_list__sym_name(chain,
  243. bf, sizeof(bf), false));
  244. }
  245. return ret;
  246. }
  247. static size_t callchain__fprintf_flat(FILE *fp, struct rb_root *tree,
  248. u64 total_samples)
  249. {
  250. size_t ret = 0;
  251. u32 entries_printed = 0;
  252. struct callchain_node *chain;
  253. struct rb_node *rb_node = rb_first(tree);
  254. while (rb_node) {
  255. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  256. ret += fprintf(fp, " ");
  257. ret += callchain_node__fprintf_value(chain, fp, total_samples);
  258. ret += fprintf(fp, "\n");
  259. ret += __callchain__fprintf_flat(fp, chain, total_samples);
  260. ret += fprintf(fp, "\n");
  261. if (++entries_printed == callchain_param.print_limit)
  262. break;
  263. rb_node = rb_next(rb_node);
  264. }
  265. return ret;
  266. }
  267. static size_t __callchain__fprintf_folded(FILE *fp, struct callchain_node *node)
  268. {
  269. const char *sep = symbol_conf.field_sep ?: ";";
  270. struct callchain_list *chain;
  271. size_t ret = 0;
  272. char bf[1024];
  273. bool first;
  274. if (!node)
  275. return 0;
  276. ret += __callchain__fprintf_folded(fp, node->parent);
  277. first = (ret == 0);
  278. list_for_each_entry(chain, &node->val, list) {
  279. if (chain->ip >= PERF_CONTEXT_MAX)
  280. continue;
  281. ret += fprintf(fp, "%s%s", first ? "" : sep,
  282. callchain_list__sym_name(chain,
  283. bf, sizeof(bf), false));
  284. first = false;
  285. }
  286. return ret;
  287. }
  288. static size_t callchain__fprintf_folded(FILE *fp, struct rb_root *tree,
  289. u64 total_samples)
  290. {
  291. size_t ret = 0;
  292. u32 entries_printed = 0;
  293. struct callchain_node *chain;
  294. struct rb_node *rb_node = rb_first(tree);
  295. while (rb_node) {
  296. chain = rb_entry(rb_node, struct callchain_node, rb_node);
  297. ret += callchain_node__fprintf_value(chain, fp, total_samples);
  298. ret += fprintf(fp, " ");
  299. ret += __callchain__fprintf_folded(fp, chain);
  300. ret += fprintf(fp, "\n");
  301. if (++entries_printed == callchain_param.print_limit)
  302. break;
  303. rb_node = rb_next(rb_node);
  304. }
  305. return ret;
  306. }
  307. static size_t hist_entry_callchain__fprintf(struct hist_entry *he,
  308. u64 total_samples, int left_margin,
  309. FILE *fp)
  310. {
  311. u64 parent_samples = he->stat.period;
  312. if (symbol_conf.cumulate_callchain)
  313. parent_samples = he->stat_acc->period;
  314. switch (callchain_param.mode) {
  315. case CHAIN_GRAPH_REL:
  316. return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
  317. parent_samples, left_margin);
  318. break;
  319. case CHAIN_GRAPH_ABS:
  320. return callchain__fprintf_graph(fp, &he->sorted_chain, total_samples,
  321. parent_samples, left_margin);
  322. break;
  323. case CHAIN_FLAT:
  324. return callchain__fprintf_flat(fp, &he->sorted_chain, total_samples);
  325. break;
  326. case CHAIN_FOLDED:
  327. return callchain__fprintf_folded(fp, &he->sorted_chain, total_samples);
  328. break;
  329. case CHAIN_NONE:
  330. break;
  331. default:
  332. pr_err("Bad callchain mode\n");
  333. }
  334. return 0;
  335. }
  336. int __hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp,
  337. struct perf_hpp_list *hpp_list)
  338. {
  339. const char *sep = symbol_conf.field_sep;
  340. struct perf_hpp_fmt *fmt;
  341. char *start = hpp->buf;
  342. int ret;
  343. bool first = true;
  344. if (symbol_conf.exclude_other && !he->parent)
  345. return 0;
  346. perf_hpp_list__for_each_format(hpp_list, fmt) {
  347. if (perf_hpp__should_skip(fmt, he->hists))
  348. continue;
  349. /*
  350. * If there's no field_sep, we still need
  351. * to display initial ' '.
  352. */
  353. if (!sep || !first) {
  354. ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
  355. advance_hpp(hpp, ret);
  356. } else
  357. first = false;
  358. if (perf_hpp__use_color() && fmt->color)
  359. ret = fmt->color(fmt, hpp, he);
  360. else
  361. ret = fmt->entry(fmt, hpp, he);
  362. ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
  363. advance_hpp(hpp, ret);
  364. }
  365. return hpp->buf - start;
  366. }
  367. static int hist_entry__snprintf(struct hist_entry *he, struct perf_hpp *hpp)
  368. {
  369. return __hist_entry__snprintf(he, hpp, he->hists->hpp_list);
  370. }
  371. static int hist_entry__hierarchy_fprintf(struct hist_entry *he,
  372. struct perf_hpp *hpp,
  373. struct hists *hists,
  374. FILE *fp)
  375. {
  376. const char *sep = symbol_conf.field_sep;
  377. struct perf_hpp_fmt *fmt;
  378. struct perf_hpp_list_node *fmt_node;
  379. char *buf = hpp->buf;
  380. size_t size = hpp->size;
  381. int ret, printed = 0;
  382. bool first = true;
  383. if (symbol_conf.exclude_other && !he->parent)
  384. return 0;
  385. ret = scnprintf(hpp->buf, hpp->size, "%*s", he->depth * HIERARCHY_INDENT, "");
  386. advance_hpp(hpp, ret);
  387. /* the first hpp_list_node is for overhead columns */
  388. fmt_node = list_first_entry(&hists->hpp_formats,
  389. struct perf_hpp_list_node, list);
  390. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  391. /*
  392. * If there's no field_sep, we still need
  393. * to display initial ' '.
  394. */
  395. if (!sep || !first) {
  396. ret = scnprintf(hpp->buf, hpp->size, "%s", sep ?: " ");
  397. advance_hpp(hpp, ret);
  398. } else
  399. first = false;
  400. if (perf_hpp__use_color() && fmt->color)
  401. ret = fmt->color(fmt, hpp, he);
  402. else
  403. ret = fmt->entry(fmt, hpp, he);
  404. ret = hist_entry__snprintf_alignment(he, hpp, fmt, ret);
  405. advance_hpp(hpp, ret);
  406. }
  407. if (!sep)
  408. ret = scnprintf(hpp->buf, hpp->size, "%*s",
  409. (hists->nr_hpp_node - 2) * HIERARCHY_INDENT, "");
  410. advance_hpp(hpp, ret);
  411. printed += fprintf(fp, "%s", buf);
  412. perf_hpp_list__for_each_format(he->hpp_list, fmt) {
  413. hpp->buf = buf;
  414. hpp->size = size;
  415. /*
  416. * No need to call hist_entry__snprintf_alignment() since this
  417. * fmt is always the last column in the hierarchy mode.
  418. */
  419. if (perf_hpp__use_color() && fmt->color)
  420. fmt->color(fmt, hpp, he);
  421. else
  422. fmt->entry(fmt, hpp, he);
  423. /*
  424. * dynamic entries are right-aligned but we want left-aligned
  425. * in the hierarchy mode
  426. */
  427. printed += fprintf(fp, "%s%s", sep ?: " ", ltrim(buf));
  428. }
  429. printed += putc('\n', fp);
  430. if (he->leaf && hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
  431. u64 total = hists__total_period(hists);
  432. printed += hist_entry_callchain__fprintf(he, total, 0, fp);
  433. goto out;
  434. }
  435. out:
  436. return printed;
  437. }
  438. static int hist_entry__fprintf(struct hist_entry *he, size_t size,
  439. char *bf, size_t bfsz, FILE *fp,
  440. bool ignore_callchains)
  441. {
  442. int ret;
  443. int callchain_ret = 0;
  444. struct perf_hpp hpp = {
  445. .buf = bf,
  446. .size = size,
  447. };
  448. struct hists *hists = he->hists;
  449. u64 total_period = hists->stats.total_period;
  450. if (size == 0 || size > bfsz)
  451. size = hpp.size = bfsz;
  452. if (symbol_conf.report_hierarchy)
  453. return hist_entry__hierarchy_fprintf(he, &hpp, hists, fp);
  454. hist_entry__snprintf(he, &hpp);
  455. ret = fprintf(fp, "%s\n", bf);
  456. if (hist_entry__has_callchains(he) && !ignore_callchains)
  457. callchain_ret = hist_entry_callchain__fprintf(he, total_period,
  458. 0, fp);
  459. ret += callchain_ret;
  460. return ret;
  461. }
  462. static int print_hierarchy_indent(const char *sep, int indent,
  463. const char *line, FILE *fp)
  464. {
  465. if (sep != NULL || indent < 2)
  466. return 0;
  467. return fprintf(fp, "%-.*s", (indent - 2) * HIERARCHY_INDENT, line);
  468. }
  469. static int hists__fprintf_hierarchy_headers(struct hists *hists,
  470. struct perf_hpp *hpp, FILE *fp)
  471. {
  472. bool first_node, first_col;
  473. int indent;
  474. int depth;
  475. unsigned width = 0;
  476. unsigned header_width = 0;
  477. struct perf_hpp_fmt *fmt;
  478. struct perf_hpp_list_node *fmt_node;
  479. const char *sep = symbol_conf.field_sep;
  480. indent = hists->nr_hpp_node;
  481. /* preserve max indent depth for column headers */
  482. print_hierarchy_indent(sep, indent, spaces, fp);
  483. /* the first hpp_list_node is for overhead columns */
  484. fmt_node = list_first_entry(&hists->hpp_formats,
  485. struct perf_hpp_list_node, list);
  486. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  487. fmt->header(fmt, hpp, hists, 0, NULL);
  488. fprintf(fp, "%s%s", hpp->buf, sep ?: " ");
  489. }
  490. /* combine sort headers with ' / ' */
  491. first_node = true;
  492. list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
  493. if (!first_node)
  494. header_width += fprintf(fp, " / ");
  495. first_node = false;
  496. first_col = true;
  497. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  498. if (perf_hpp__should_skip(fmt, hists))
  499. continue;
  500. if (!first_col)
  501. header_width += fprintf(fp, "+");
  502. first_col = false;
  503. fmt->header(fmt, hpp, hists, 0, NULL);
  504. header_width += fprintf(fp, "%s", trim(hpp->buf));
  505. }
  506. }
  507. fprintf(fp, "\n# ");
  508. /* preserve max indent depth for initial dots */
  509. print_hierarchy_indent(sep, indent, dots, fp);
  510. /* the first hpp_list_node is for overhead columns */
  511. fmt_node = list_first_entry(&hists->hpp_formats,
  512. struct perf_hpp_list_node, list);
  513. first_col = true;
  514. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  515. if (!first_col)
  516. fprintf(fp, "%s", sep ?: "..");
  517. first_col = false;
  518. width = fmt->width(fmt, hpp, hists);
  519. fprintf(fp, "%.*s", width, dots);
  520. }
  521. depth = 0;
  522. list_for_each_entry_continue(fmt_node, &hists->hpp_formats, list) {
  523. first_col = true;
  524. width = depth * HIERARCHY_INDENT;
  525. perf_hpp_list__for_each_format(&fmt_node->hpp, fmt) {
  526. if (perf_hpp__should_skip(fmt, hists))
  527. continue;
  528. if (!first_col)
  529. width++; /* for '+' sign between column header */
  530. first_col = false;
  531. width += fmt->width(fmt, hpp, hists);
  532. }
  533. if (width > header_width)
  534. header_width = width;
  535. depth++;
  536. }
  537. fprintf(fp, "%s%-.*s", sep ?: " ", header_width, dots);
  538. fprintf(fp, "\n#\n");
  539. return 2;
  540. }
  541. static void fprintf_line(struct hists *hists, struct perf_hpp *hpp,
  542. int line, FILE *fp)
  543. {
  544. struct perf_hpp_fmt *fmt;
  545. const char *sep = symbol_conf.field_sep;
  546. bool first = true;
  547. int span = 0;
  548. hists__for_each_format(hists, fmt) {
  549. if (perf_hpp__should_skip(fmt, hists))
  550. continue;
  551. if (!first && !span)
  552. fprintf(fp, "%s", sep ?: " ");
  553. else
  554. first = false;
  555. fmt->header(fmt, hpp, hists, line, &span);
  556. if (!span)
  557. fprintf(fp, "%s", hpp->buf);
  558. }
  559. }
  560. static int
  561. hists__fprintf_standard_headers(struct hists *hists,
  562. struct perf_hpp *hpp,
  563. FILE *fp)
  564. {
  565. struct perf_hpp_list *hpp_list = hists->hpp_list;
  566. struct perf_hpp_fmt *fmt;
  567. unsigned int width;
  568. const char *sep = symbol_conf.field_sep;
  569. bool first = true;
  570. int line;
  571. for (line = 0; line < hpp_list->nr_header_lines; line++) {
  572. /* first # is displayed one level up */
  573. if (line)
  574. fprintf(fp, "# ");
  575. fprintf_line(hists, hpp, line, fp);
  576. fprintf(fp, "\n");
  577. }
  578. if (sep)
  579. return hpp_list->nr_header_lines;
  580. first = true;
  581. fprintf(fp, "# ");
  582. hists__for_each_format(hists, fmt) {
  583. unsigned int i;
  584. if (perf_hpp__should_skip(fmt, hists))
  585. continue;
  586. if (!first)
  587. fprintf(fp, "%s", sep ?: " ");
  588. else
  589. first = false;
  590. width = fmt->width(fmt, hpp, hists);
  591. for (i = 0; i < width; i++)
  592. fprintf(fp, ".");
  593. }
  594. fprintf(fp, "\n");
  595. fprintf(fp, "#\n");
  596. return hpp_list->nr_header_lines + 2;
  597. }
  598. int hists__fprintf_headers(struct hists *hists, FILE *fp)
  599. {
  600. char bf[1024];
  601. struct perf_hpp dummy_hpp = {
  602. .buf = bf,
  603. .size = sizeof(bf),
  604. };
  605. fprintf(fp, "# ");
  606. if (symbol_conf.report_hierarchy)
  607. return hists__fprintf_hierarchy_headers(hists, &dummy_hpp, fp);
  608. else
  609. return hists__fprintf_standard_headers(hists, &dummy_hpp, fp);
  610. }
  611. size_t hists__fprintf(struct hists *hists, bool show_header, int max_rows,
  612. int max_cols, float min_pcnt, FILE *fp,
  613. bool ignore_callchains)
  614. {
  615. struct rb_node *nd;
  616. size_t ret = 0;
  617. const char *sep = symbol_conf.field_sep;
  618. int nr_rows = 0;
  619. size_t linesz;
  620. char *line = NULL;
  621. unsigned indent;
  622. init_rem_hits();
  623. hists__reset_column_width(hists);
  624. if (symbol_conf.col_width_list_str)
  625. perf_hpp__set_user_width(symbol_conf.col_width_list_str);
  626. if (show_header)
  627. nr_rows += hists__fprintf_headers(hists, fp);
  628. if (max_rows && nr_rows >= max_rows)
  629. goto out;
  630. linesz = hists__sort_list_width(hists) + 3 + 1;
  631. linesz += perf_hpp__color_overhead();
  632. line = malloc(linesz);
  633. if (line == NULL) {
  634. ret = -1;
  635. goto out;
  636. }
  637. indent = hists__overhead_width(hists) + 4;
  638. for (nd = rb_first(&hists->entries); nd; nd = __rb_hierarchy_next(nd, HMD_FORCE_CHILD)) {
  639. struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
  640. float percent;
  641. if (h->filtered)
  642. continue;
  643. percent = hist_entry__get_percent_limit(h);
  644. if (percent < min_pcnt)
  645. continue;
  646. ret += hist_entry__fprintf(h, max_cols, line, linesz, fp, ignore_callchains);
  647. if (max_rows && ++nr_rows >= max_rows)
  648. break;
  649. /*
  650. * If all children are filtered out or percent-limited,
  651. * display "no entry >= x.xx%" message.
  652. */
  653. if (!h->leaf && !hist_entry__has_hierarchy_children(h, min_pcnt)) {
  654. int depth = hists->nr_hpp_node + h->depth + 1;
  655. print_hierarchy_indent(sep, depth, spaces, fp);
  656. fprintf(fp, "%*sno entry >= %.2f%%\n", indent, "", min_pcnt);
  657. if (max_rows && ++nr_rows >= max_rows)
  658. break;
  659. }
  660. if (h->ms.map == NULL && verbose > 1) {
  661. map_groups__fprintf(h->thread->mg, fp);
  662. fprintf(fp, "%.10s end\n", graph_dotted_line);
  663. }
  664. }
  665. free(line);
  666. out:
  667. zfree(&rem_sq_bracket);
  668. return ret;
  669. }
  670. size_t events_stats__fprintf(struct events_stats *stats, FILE *fp)
  671. {
  672. int i;
  673. size_t ret = 0;
  674. for (i = 0; i < PERF_RECORD_HEADER_MAX; ++i) {
  675. const char *name;
  676. name = perf_event__name(i);
  677. if (!strcmp(name, "UNKNOWN"))
  678. continue;
  679. ret += fprintf(fp, "%16s events: %10d\n", name, stats->nr_events[i]);
  680. }
  681. return ret;
  682. }