callchain.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2009-2011, Frederic Weisbecker <fweisbec@gmail.com>
  4. *
  5. * Handle the callchains from the stream in an ad-hoc radix tree and then
  6. * sort them in an rbtree.
  7. *
  8. * Using a radix for code path provides a fast retrieval and factorizes
  9. * memory use. Also that lets us use the paths in a hierarchical graph view.
  10. *
  11. */
  12. #include <inttypes.h>
  13. #include <stdlib.h>
  14. #include <stdio.h>
  15. #include <stdbool.h>
  16. #include <errno.h>
  17. #include <math.h>
  18. #include "asm/bug.h"
  19. #include "hist.h"
  20. #include "util.h"
  21. #include "sort.h"
  22. #include "machine.h"
  23. #include "callchain.h"
  24. #include "branch.h"
  25. #define CALLCHAIN_PARAM_DEFAULT \
  26. .mode = CHAIN_GRAPH_ABS, \
  27. .min_percent = 0.5, \
  28. .order = ORDER_CALLEE, \
  29. .key = CCKEY_FUNCTION, \
  30. .value = CCVAL_PERCENT, \
  31. struct callchain_param callchain_param = {
  32. CALLCHAIN_PARAM_DEFAULT
  33. };
  34. /*
  35. * Are there any events usind DWARF callchains?
  36. *
  37. * I.e.
  38. *
  39. * -e cycles/call-graph=dwarf/
  40. */
  41. bool dwarf_callchain_users;
  42. struct callchain_param callchain_param_default = {
  43. CALLCHAIN_PARAM_DEFAULT
  44. };
  45. __thread struct callchain_cursor callchain_cursor;
  46. int parse_callchain_record_opt(const char *arg, struct callchain_param *param)
  47. {
  48. return parse_callchain_record(arg, param);
  49. }
  50. static int parse_callchain_mode(const char *value)
  51. {
  52. if (!strncmp(value, "graph", strlen(value))) {
  53. callchain_param.mode = CHAIN_GRAPH_ABS;
  54. return 0;
  55. }
  56. if (!strncmp(value, "flat", strlen(value))) {
  57. callchain_param.mode = CHAIN_FLAT;
  58. return 0;
  59. }
  60. if (!strncmp(value, "fractal", strlen(value))) {
  61. callchain_param.mode = CHAIN_GRAPH_REL;
  62. return 0;
  63. }
  64. if (!strncmp(value, "folded", strlen(value))) {
  65. callchain_param.mode = CHAIN_FOLDED;
  66. return 0;
  67. }
  68. return -1;
  69. }
  70. static int parse_callchain_order(const char *value)
  71. {
  72. if (!strncmp(value, "caller", strlen(value))) {
  73. callchain_param.order = ORDER_CALLER;
  74. callchain_param.order_set = true;
  75. return 0;
  76. }
  77. if (!strncmp(value, "callee", strlen(value))) {
  78. callchain_param.order = ORDER_CALLEE;
  79. callchain_param.order_set = true;
  80. return 0;
  81. }
  82. return -1;
  83. }
  84. static int parse_callchain_sort_key(const char *value)
  85. {
  86. if (!strncmp(value, "function", strlen(value))) {
  87. callchain_param.key = CCKEY_FUNCTION;
  88. return 0;
  89. }
  90. if (!strncmp(value, "address", strlen(value))) {
  91. callchain_param.key = CCKEY_ADDRESS;
  92. return 0;
  93. }
  94. if (!strncmp(value, "srcline", strlen(value))) {
  95. callchain_param.key = CCKEY_SRCLINE;
  96. return 0;
  97. }
  98. if (!strncmp(value, "branch", strlen(value))) {
  99. callchain_param.branch_callstack = 1;
  100. return 0;
  101. }
  102. return -1;
  103. }
  104. static int parse_callchain_value(const char *value)
  105. {
  106. if (!strncmp(value, "percent", strlen(value))) {
  107. callchain_param.value = CCVAL_PERCENT;
  108. return 0;
  109. }
  110. if (!strncmp(value, "period", strlen(value))) {
  111. callchain_param.value = CCVAL_PERIOD;
  112. return 0;
  113. }
  114. if (!strncmp(value, "count", strlen(value))) {
  115. callchain_param.value = CCVAL_COUNT;
  116. return 0;
  117. }
  118. return -1;
  119. }
  120. static int get_stack_size(const char *str, unsigned long *_size)
  121. {
  122. char *endptr;
  123. unsigned long size;
  124. unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
  125. size = strtoul(str, &endptr, 0);
  126. do {
  127. if (*endptr)
  128. break;
  129. size = round_up(size, sizeof(u64));
  130. if (!size || size > max_size)
  131. break;
  132. *_size = size;
  133. return 0;
  134. } while (0);
  135. pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
  136. max_size, str);
  137. return -1;
  138. }
  139. static int
  140. __parse_callchain_report_opt(const char *arg, bool allow_record_opt)
  141. {
  142. char *tok;
  143. char *endptr, *saveptr = NULL;
  144. bool minpcnt_set = false;
  145. bool record_opt_set = false;
  146. bool try_stack_size = false;
  147. callchain_param.enabled = true;
  148. symbol_conf.use_callchain = true;
  149. if (!arg)
  150. return 0;
  151. while ((tok = strtok_r((char *)arg, ",", &saveptr)) != NULL) {
  152. if (!strncmp(tok, "none", strlen(tok))) {
  153. callchain_param.mode = CHAIN_NONE;
  154. callchain_param.enabled = false;
  155. symbol_conf.use_callchain = false;
  156. return 0;
  157. }
  158. if (!parse_callchain_mode(tok) ||
  159. !parse_callchain_order(tok) ||
  160. !parse_callchain_sort_key(tok) ||
  161. !parse_callchain_value(tok)) {
  162. /* parsing ok - move on to the next */
  163. try_stack_size = false;
  164. goto next;
  165. } else if (allow_record_opt && !record_opt_set) {
  166. if (parse_callchain_record(tok, &callchain_param))
  167. goto try_numbers;
  168. /* assume that number followed by 'dwarf' is stack size */
  169. if (callchain_param.record_mode == CALLCHAIN_DWARF)
  170. try_stack_size = true;
  171. record_opt_set = true;
  172. goto next;
  173. }
  174. try_numbers:
  175. if (try_stack_size) {
  176. unsigned long size = 0;
  177. if (get_stack_size(tok, &size) < 0)
  178. return -1;
  179. callchain_param.dump_size = size;
  180. try_stack_size = false;
  181. } else if (!minpcnt_set) {
  182. /* try to get the min percent */
  183. callchain_param.min_percent = strtod(tok, &endptr);
  184. if (tok == endptr)
  185. return -1;
  186. minpcnt_set = true;
  187. } else {
  188. /* try print limit at last */
  189. callchain_param.print_limit = strtoul(tok, &endptr, 0);
  190. if (tok == endptr)
  191. return -1;
  192. }
  193. next:
  194. arg = NULL;
  195. }
  196. if (callchain_register_param(&callchain_param) < 0) {
  197. pr_err("Can't register callchain params\n");
  198. return -1;
  199. }
  200. return 0;
  201. }
  202. int parse_callchain_report_opt(const char *arg)
  203. {
  204. return __parse_callchain_report_opt(arg, false);
  205. }
  206. int parse_callchain_top_opt(const char *arg)
  207. {
  208. return __parse_callchain_report_opt(arg, true);
  209. }
  210. int parse_callchain_record(const char *arg, struct callchain_param *param)
  211. {
  212. char *tok, *name, *saveptr = NULL;
  213. char *buf;
  214. int ret = -1;
  215. /* We need buffer that we know we can write to. */
  216. buf = malloc(strlen(arg) + 1);
  217. if (!buf)
  218. return -ENOMEM;
  219. strcpy(buf, arg);
  220. tok = strtok_r((char *)buf, ",", &saveptr);
  221. name = tok ? : (char *)buf;
  222. do {
  223. /* Framepointer style */
  224. if (!strncmp(name, "fp", sizeof("fp"))) {
  225. if (!strtok_r(NULL, ",", &saveptr)) {
  226. param->record_mode = CALLCHAIN_FP;
  227. ret = 0;
  228. } else
  229. pr_err("callchain: No more arguments "
  230. "needed for --call-graph fp\n");
  231. break;
  232. /* Dwarf style */
  233. } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
  234. const unsigned long default_stack_dump_size = 8192;
  235. ret = 0;
  236. param->record_mode = CALLCHAIN_DWARF;
  237. param->dump_size = default_stack_dump_size;
  238. dwarf_callchain_users = true;
  239. tok = strtok_r(NULL, ",", &saveptr);
  240. if (tok) {
  241. unsigned long size = 0;
  242. ret = get_stack_size(tok, &size);
  243. param->dump_size = size;
  244. }
  245. } else if (!strncmp(name, "lbr", sizeof("lbr"))) {
  246. if (!strtok_r(NULL, ",", &saveptr)) {
  247. param->record_mode = CALLCHAIN_LBR;
  248. ret = 0;
  249. } else
  250. pr_err("callchain: No more arguments "
  251. "needed for --call-graph lbr\n");
  252. break;
  253. } else {
  254. pr_err("callchain: Unknown --call-graph option "
  255. "value: %s\n", arg);
  256. break;
  257. }
  258. } while (0);
  259. free(buf);
  260. return ret;
  261. }
  262. int perf_callchain_config(const char *var, const char *value)
  263. {
  264. char *endptr;
  265. if (!strstarts(var, "call-graph."))
  266. return 0;
  267. var += sizeof("call-graph.") - 1;
  268. if (!strcmp(var, "record-mode"))
  269. return parse_callchain_record_opt(value, &callchain_param);
  270. if (!strcmp(var, "dump-size")) {
  271. unsigned long size = 0;
  272. int ret;
  273. ret = get_stack_size(value, &size);
  274. callchain_param.dump_size = size;
  275. return ret;
  276. }
  277. if (!strcmp(var, "print-type")){
  278. int ret;
  279. ret = parse_callchain_mode(value);
  280. if (ret == -1)
  281. pr_err("Invalid callchain mode: %s\n", value);
  282. return ret;
  283. }
  284. if (!strcmp(var, "order")){
  285. int ret;
  286. ret = parse_callchain_order(value);
  287. if (ret == -1)
  288. pr_err("Invalid callchain order: %s\n", value);
  289. return ret;
  290. }
  291. if (!strcmp(var, "sort-key")){
  292. int ret;
  293. ret = parse_callchain_sort_key(value);
  294. if (ret == -1)
  295. pr_err("Invalid callchain sort key: %s\n", value);
  296. return ret;
  297. }
  298. if (!strcmp(var, "threshold")) {
  299. callchain_param.min_percent = strtod(value, &endptr);
  300. if (value == endptr) {
  301. pr_err("Invalid callchain threshold: %s\n", value);
  302. return -1;
  303. }
  304. }
  305. if (!strcmp(var, "print-limit")) {
  306. callchain_param.print_limit = strtod(value, &endptr);
  307. if (value == endptr) {
  308. pr_err("Invalid callchain print limit: %s\n", value);
  309. return -1;
  310. }
  311. }
  312. return 0;
  313. }
  314. static void
  315. rb_insert_callchain(struct rb_root *root, struct callchain_node *chain,
  316. enum chain_mode mode)
  317. {
  318. struct rb_node **p = &root->rb_node;
  319. struct rb_node *parent = NULL;
  320. struct callchain_node *rnode;
  321. u64 chain_cumul = callchain_cumul_hits(chain);
  322. while (*p) {
  323. u64 rnode_cumul;
  324. parent = *p;
  325. rnode = rb_entry(parent, struct callchain_node, rb_node);
  326. rnode_cumul = callchain_cumul_hits(rnode);
  327. switch (mode) {
  328. case CHAIN_FLAT:
  329. case CHAIN_FOLDED:
  330. if (rnode->hit < chain->hit)
  331. p = &(*p)->rb_left;
  332. else
  333. p = &(*p)->rb_right;
  334. break;
  335. case CHAIN_GRAPH_ABS: /* Falldown */
  336. case CHAIN_GRAPH_REL:
  337. if (rnode_cumul < chain_cumul)
  338. p = &(*p)->rb_left;
  339. else
  340. p = &(*p)->rb_right;
  341. break;
  342. case CHAIN_NONE:
  343. default:
  344. break;
  345. }
  346. }
  347. rb_link_node(&chain->rb_node, parent, p);
  348. rb_insert_color(&chain->rb_node, root);
  349. }
  350. static void
  351. __sort_chain_flat(struct rb_root *rb_root, struct callchain_node *node,
  352. u64 min_hit)
  353. {
  354. struct rb_node *n;
  355. struct callchain_node *child;
  356. n = rb_first(&node->rb_root_in);
  357. while (n) {
  358. child = rb_entry(n, struct callchain_node, rb_node_in);
  359. n = rb_next(n);
  360. __sort_chain_flat(rb_root, child, min_hit);
  361. }
  362. if (node->hit && node->hit >= min_hit)
  363. rb_insert_callchain(rb_root, node, CHAIN_FLAT);
  364. }
  365. /*
  366. * Once we get every callchains from the stream, we can now
  367. * sort them by hit
  368. */
  369. static void
  370. sort_chain_flat(struct rb_root *rb_root, struct callchain_root *root,
  371. u64 min_hit, struct callchain_param *param __maybe_unused)
  372. {
  373. *rb_root = RB_ROOT;
  374. __sort_chain_flat(rb_root, &root->node, min_hit);
  375. }
  376. static void __sort_chain_graph_abs(struct callchain_node *node,
  377. u64 min_hit)
  378. {
  379. struct rb_node *n;
  380. struct callchain_node *child;
  381. node->rb_root = RB_ROOT;
  382. n = rb_first(&node->rb_root_in);
  383. while (n) {
  384. child = rb_entry(n, struct callchain_node, rb_node_in);
  385. n = rb_next(n);
  386. __sort_chain_graph_abs(child, min_hit);
  387. if (callchain_cumul_hits(child) >= min_hit)
  388. rb_insert_callchain(&node->rb_root, child,
  389. CHAIN_GRAPH_ABS);
  390. }
  391. }
  392. static void
  393. sort_chain_graph_abs(struct rb_root *rb_root, struct callchain_root *chain_root,
  394. u64 min_hit, struct callchain_param *param __maybe_unused)
  395. {
  396. __sort_chain_graph_abs(&chain_root->node, min_hit);
  397. rb_root->rb_node = chain_root->node.rb_root.rb_node;
  398. }
  399. static void __sort_chain_graph_rel(struct callchain_node *node,
  400. double min_percent)
  401. {
  402. struct rb_node *n;
  403. struct callchain_node *child;
  404. u64 min_hit;
  405. node->rb_root = RB_ROOT;
  406. min_hit = ceil(node->children_hit * min_percent);
  407. n = rb_first(&node->rb_root_in);
  408. while (n) {
  409. child = rb_entry(n, struct callchain_node, rb_node_in);
  410. n = rb_next(n);
  411. __sort_chain_graph_rel(child, min_percent);
  412. if (callchain_cumul_hits(child) >= min_hit)
  413. rb_insert_callchain(&node->rb_root, child,
  414. CHAIN_GRAPH_REL);
  415. }
  416. }
  417. static void
  418. sort_chain_graph_rel(struct rb_root *rb_root, struct callchain_root *chain_root,
  419. u64 min_hit __maybe_unused, struct callchain_param *param)
  420. {
  421. __sort_chain_graph_rel(&chain_root->node, param->min_percent / 100.0);
  422. rb_root->rb_node = chain_root->node.rb_root.rb_node;
  423. }
  424. int callchain_register_param(struct callchain_param *param)
  425. {
  426. switch (param->mode) {
  427. case CHAIN_GRAPH_ABS:
  428. param->sort = sort_chain_graph_abs;
  429. break;
  430. case CHAIN_GRAPH_REL:
  431. param->sort = sort_chain_graph_rel;
  432. break;
  433. case CHAIN_FLAT:
  434. case CHAIN_FOLDED:
  435. param->sort = sort_chain_flat;
  436. break;
  437. case CHAIN_NONE:
  438. default:
  439. return -1;
  440. }
  441. return 0;
  442. }
  443. /*
  444. * Create a child for a parent. If inherit_children, then the new child
  445. * will become the new parent of it's parent children
  446. */
  447. static struct callchain_node *
  448. create_child(struct callchain_node *parent, bool inherit_children)
  449. {
  450. struct callchain_node *new;
  451. new = zalloc(sizeof(*new));
  452. if (!new) {
  453. perror("not enough memory to create child for code path tree");
  454. return NULL;
  455. }
  456. new->parent = parent;
  457. INIT_LIST_HEAD(&new->val);
  458. INIT_LIST_HEAD(&new->parent_val);
  459. if (inherit_children) {
  460. struct rb_node *n;
  461. struct callchain_node *child;
  462. new->rb_root_in = parent->rb_root_in;
  463. parent->rb_root_in = RB_ROOT;
  464. n = rb_first(&new->rb_root_in);
  465. while (n) {
  466. child = rb_entry(n, struct callchain_node, rb_node_in);
  467. child->parent = new;
  468. n = rb_next(n);
  469. }
  470. /* make it the first child */
  471. rb_link_node(&new->rb_node_in, NULL, &parent->rb_root_in.rb_node);
  472. rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
  473. }
  474. return new;
  475. }
  476. /*
  477. * Fill the node with callchain values
  478. */
  479. static int
  480. fill_node(struct callchain_node *node, struct callchain_cursor *cursor)
  481. {
  482. struct callchain_cursor_node *cursor_node;
  483. node->val_nr = cursor->nr - cursor->pos;
  484. if (!node->val_nr)
  485. pr_warning("Warning: empty node in callchain tree\n");
  486. cursor_node = callchain_cursor_current(cursor);
  487. while (cursor_node) {
  488. struct callchain_list *call;
  489. call = zalloc(sizeof(*call));
  490. if (!call) {
  491. perror("not enough memory for the code path tree");
  492. return -1;
  493. }
  494. call->ip = cursor_node->ip;
  495. call->ms.sym = cursor_node->sym;
  496. call->ms.map = map__get(cursor_node->map);
  497. call->srcline = cursor_node->srcline;
  498. if (cursor_node->branch) {
  499. call->branch_count = 1;
  500. if (cursor_node->branch_from) {
  501. /*
  502. * branch_from is set with value somewhere else
  503. * to imply it's "to" of a branch.
  504. */
  505. call->brtype_stat.branch_to = true;
  506. if (cursor_node->branch_flags.predicted)
  507. call->predicted_count = 1;
  508. if (cursor_node->branch_flags.abort)
  509. call->abort_count = 1;
  510. branch_type_count(&call->brtype_stat,
  511. &cursor_node->branch_flags,
  512. cursor_node->branch_from,
  513. cursor_node->ip);
  514. } else {
  515. /*
  516. * It's "from" of a branch
  517. */
  518. call->brtype_stat.branch_to = false;
  519. call->cycles_count =
  520. cursor_node->branch_flags.cycles;
  521. call->iter_count = cursor_node->nr_loop_iter;
  522. call->iter_cycles = cursor_node->iter_cycles;
  523. }
  524. }
  525. list_add_tail(&call->list, &node->val);
  526. callchain_cursor_advance(cursor);
  527. cursor_node = callchain_cursor_current(cursor);
  528. }
  529. return 0;
  530. }
  531. static struct callchain_node *
  532. add_child(struct callchain_node *parent,
  533. struct callchain_cursor *cursor,
  534. u64 period)
  535. {
  536. struct callchain_node *new;
  537. new = create_child(parent, false);
  538. if (new == NULL)
  539. return NULL;
  540. if (fill_node(new, cursor) < 0) {
  541. struct callchain_list *call, *tmp;
  542. list_for_each_entry_safe(call, tmp, &new->val, list) {
  543. list_del(&call->list);
  544. map__zput(call->ms.map);
  545. free(call);
  546. }
  547. free(new);
  548. return NULL;
  549. }
  550. new->children_hit = 0;
  551. new->hit = period;
  552. new->children_count = 0;
  553. new->count = 1;
  554. return new;
  555. }
  556. enum match_result {
  557. MATCH_ERROR = -1,
  558. MATCH_EQ,
  559. MATCH_LT,
  560. MATCH_GT,
  561. };
  562. static enum match_result match_chain_strings(const char *left,
  563. const char *right)
  564. {
  565. enum match_result ret = MATCH_EQ;
  566. int cmp;
  567. if (left && right)
  568. cmp = strcmp(left, right);
  569. else if (!left && right)
  570. cmp = 1;
  571. else if (left && !right)
  572. cmp = -1;
  573. else
  574. return MATCH_ERROR;
  575. if (cmp != 0)
  576. ret = cmp < 0 ? MATCH_LT : MATCH_GT;
  577. return ret;
  578. }
  579. /*
  580. * We need to always use relative addresses because we're aggregating
  581. * callchains from multiple threads, i.e. different address spaces, so
  582. * comparing absolute addresses make no sense as a symbol in a DSO may end up
  583. * in a different address when used in a different binary or even the same
  584. * binary but with some sort of address randomization technique, thus we need
  585. * to compare just relative addresses. -acme
  586. */
  587. static enum match_result match_chain_dso_addresses(struct map *left_map, u64 left_ip,
  588. struct map *right_map, u64 right_ip)
  589. {
  590. struct dso *left_dso = left_map ? left_map->dso : NULL;
  591. struct dso *right_dso = right_map ? right_map->dso : NULL;
  592. if (left_dso != right_dso)
  593. return left_dso < right_dso ? MATCH_LT : MATCH_GT;
  594. if (left_ip != right_ip)
  595. return left_ip < right_ip ? MATCH_LT : MATCH_GT;
  596. return MATCH_EQ;
  597. }
  598. static enum match_result match_chain(struct callchain_cursor_node *node,
  599. struct callchain_list *cnode)
  600. {
  601. enum match_result match = MATCH_ERROR;
  602. switch (callchain_param.key) {
  603. case CCKEY_SRCLINE:
  604. match = match_chain_strings(cnode->srcline, node->srcline);
  605. if (match != MATCH_ERROR)
  606. break;
  607. /* otherwise fall-back to symbol-based comparison below */
  608. __fallthrough;
  609. case CCKEY_FUNCTION:
  610. if (node->sym && cnode->ms.sym) {
  611. /*
  612. * Compare inlined frames based on their symbol name
  613. * because different inlined frames will have the same
  614. * symbol start. Otherwise do a faster comparison based
  615. * on the symbol start address.
  616. */
  617. if (cnode->ms.sym->inlined || node->sym->inlined) {
  618. match = match_chain_strings(cnode->ms.sym->name,
  619. node->sym->name);
  620. if (match != MATCH_ERROR)
  621. break;
  622. } else {
  623. match = match_chain_dso_addresses(cnode->ms.map, cnode->ms.sym->start,
  624. node->map, node->sym->start);
  625. break;
  626. }
  627. }
  628. /* otherwise fall-back to IP-based comparison below */
  629. __fallthrough;
  630. case CCKEY_ADDRESS:
  631. default:
  632. match = match_chain_dso_addresses(cnode->ms.map, cnode->ip, node->map, node->ip);
  633. break;
  634. }
  635. if (match == MATCH_EQ && node->branch) {
  636. cnode->branch_count++;
  637. if (node->branch_from) {
  638. /*
  639. * It's "to" of a branch
  640. */
  641. cnode->brtype_stat.branch_to = true;
  642. if (node->branch_flags.predicted)
  643. cnode->predicted_count++;
  644. if (node->branch_flags.abort)
  645. cnode->abort_count++;
  646. branch_type_count(&cnode->brtype_stat,
  647. &node->branch_flags,
  648. node->branch_from,
  649. node->ip);
  650. } else {
  651. /*
  652. * It's "from" of a branch
  653. */
  654. cnode->brtype_stat.branch_to = false;
  655. cnode->cycles_count += node->branch_flags.cycles;
  656. cnode->iter_count += node->nr_loop_iter;
  657. cnode->iter_cycles += node->iter_cycles;
  658. cnode->from_count++;
  659. }
  660. }
  661. return match;
  662. }
  663. /*
  664. * Split the parent in two parts (a new child is created) and
  665. * give a part of its callchain to the created child.
  666. * Then create another child to host the given callchain of new branch
  667. */
  668. static int
  669. split_add_child(struct callchain_node *parent,
  670. struct callchain_cursor *cursor,
  671. struct callchain_list *to_split,
  672. u64 idx_parents, u64 idx_local, u64 period)
  673. {
  674. struct callchain_node *new;
  675. struct list_head *old_tail;
  676. unsigned int idx_total = idx_parents + idx_local;
  677. /* split */
  678. new = create_child(parent, true);
  679. if (new == NULL)
  680. return -1;
  681. /* split the callchain and move a part to the new child */
  682. old_tail = parent->val.prev;
  683. list_del_range(&to_split->list, old_tail);
  684. new->val.next = &to_split->list;
  685. new->val.prev = old_tail;
  686. to_split->list.prev = &new->val;
  687. old_tail->next = &new->val;
  688. /* split the hits */
  689. new->hit = parent->hit;
  690. new->children_hit = parent->children_hit;
  691. parent->children_hit = callchain_cumul_hits(new);
  692. new->val_nr = parent->val_nr - idx_local;
  693. parent->val_nr = idx_local;
  694. new->count = parent->count;
  695. new->children_count = parent->children_count;
  696. parent->children_count = callchain_cumul_counts(new);
  697. /* create a new child for the new branch if any */
  698. if (idx_total < cursor->nr) {
  699. struct callchain_node *first;
  700. struct callchain_list *cnode;
  701. struct callchain_cursor_node *node;
  702. struct rb_node *p, **pp;
  703. parent->hit = 0;
  704. parent->children_hit += period;
  705. parent->count = 0;
  706. parent->children_count += 1;
  707. node = callchain_cursor_current(cursor);
  708. new = add_child(parent, cursor, period);
  709. if (new == NULL)
  710. return -1;
  711. /*
  712. * This is second child since we moved parent's children
  713. * to new (first) child above.
  714. */
  715. p = parent->rb_root_in.rb_node;
  716. first = rb_entry(p, struct callchain_node, rb_node_in);
  717. cnode = list_first_entry(&first->val, struct callchain_list,
  718. list);
  719. if (match_chain(node, cnode) == MATCH_LT)
  720. pp = &p->rb_left;
  721. else
  722. pp = &p->rb_right;
  723. rb_link_node(&new->rb_node_in, p, pp);
  724. rb_insert_color(&new->rb_node_in, &parent->rb_root_in);
  725. } else {
  726. parent->hit = period;
  727. parent->count = 1;
  728. }
  729. return 0;
  730. }
  731. static enum match_result
  732. append_chain(struct callchain_node *root,
  733. struct callchain_cursor *cursor,
  734. u64 period);
  735. static int
  736. append_chain_children(struct callchain_node *root,
  737. struct callchain_cursor *cursor,
  738. u64 period)
  739. {
  740. struct callchain_node *rnode;
  741. struct callchain_cursor_node *node;
  742. struct rb_node **p = &root->rb_root_in.rb_node;
  743. struct rb_node *parent = NULL;
  744. node = callchain_cursor_current(cursor);
  745. if (!node)
  746. return -1;
  747. /* lookup in childrens */
  748. while (*p) {
  749. enum match_result ret;
  750. parent = *p;
  751. rnode = rb_entry(parent, struct callchain_node, rb_node_in);
  752. /* If at least first entry matches, rely to children */
  753. ret = append_chain(rnode, cursor, period);
  754. if (ret == MATCH_EQ)
  755. goto inc_children_hit;
  756. if (ret == MATCH_ERROR)
  757. return -1;
  758. if (ret == MATCH_LT)
  759. p = &parent->rb_left;
  760. else
  761. p = &parent->rb_right;
  762. }
  763. /* nothing in children, add to the current node */
  764. rnode = add_child(root, cursor, period);
  765. if (rnode == NULL)
  766. return -1;
  767. rb_link_node(&rnode->rb_node_in, parent, p);
  768. rb_insert_color(&rnode->rb_node_in, &root->rb_root_in);
  769. inc_children_hit:
  770. root->children_hit += period;
  771. root->children_count++;
  772. return 0;
  773. }
  774. static enum match_result
  775. append_chain(struct callchain_node *root,
  776. struct callchain_cursor *cursor,
  777. u64 period)
  778. {
  779. struct callchain_list *cnode;
  780. u64 start = cursor->pos;
  781. bool found = false;
  782. u64 matches;
  783. enum match_result cmp = MATCH_ERROR;
  784. /*
  785. * Lookup in the current node
  786. * If we have a symbol, then compare the start to match
  787. * anywhere inside a function, unless function
  788. * mode is disabled.
  789. */
  790. list_for_each_entry(cnode, &root->val, list) {
  791. struct callchain_cursor_node *node;
  792. node = callchain_cursor_current(cursor);
  793. if (!node)
  794. break;
  795. cmp = match_chain(node, cnode);
  796. if (cmp != MATCH_EQ)
  797. break;
  798. found = true;
  799. callchain_cursor_advance(cursor);
  800. }
  801. /* matches not, relay no the parent */
  802. if (!found) {
  803. WARN_ONCE(cmp == MATCH_ERROR, "Chain comparison error\n");
  804. return cmp;
  805. }
  806. matches = cursor->pos - start;
  807. /* we match only a part of the node. Split it and add the new chain */
  808. if (matches < root->val_nr) {
  809. if (split_add_child(root, cursor, cnode, start, matches,
  810. period) < 0)
  811. return MATCH_ERROR;
  812. return MATCH_EQ;
  813. }
  814. /* we match 100% of the path, increment the hit */
  815. if (matches == root->val_nr && cursor->pos == cursor->nr) {
  816. root->hit += period;
  817. root->count++;
  818. return MATCH_EQ;
  819. }
  820. /* We match the node and still have a part remaining */
  821. if (append_chain_children(root, cursor, period) < 0)
  822. return MATCH_ERROR;
  823. return MATCH_EQ;
  824. }
  825. int callchain_append(struct callchain_root *root,
  826. struct callchain_cursor *cursor,
  827. u64 period)
  828. {
  829. if (!cursor->nr)
  830. return 0;
  831. callchain_cursor_commit(cursor);
  832. if (append_chain_children(&root->node, cursor, period) < 0)
  833. return -1;
  834. if (cursor->nr > root->max_depth)
  835. root->max_depth = cursor->nr;
  836. return 0;
  837. }
  838. static int
  839. merge_chain_branch(struct callchain_cursor *cursor,
  840. struct callchain_node *dst, struct callchain_node *src)
  841. {
  842. struct callchain_cursor_node **old_last = cursor->last;
  843. struct callchain_node *child;
  844. struct callchain_list *list, *next_list;
  845. struct rb_node *n;
  846. int old_pos = cursor->nr;
  847. int err = 0;
  848. list_for_each_entry_safe(list, next_list, &src->val, list) {
  849. callchain_cursor_append(cursor, list->ip,
  850. list->ms.map, list->ms.sym,
  851. false, NULL, 0, 0, 0, list->srcline);
  852. list_del(&list->list);
  853. map__zput(list->ms.map);
  854. free(list);
  855. }
  856. if (src->hit) {
  857. callchain_cursor_commit(cursor);
  858. if (append_chain_children(dst, cursor, src->hit) < 0)
  859. return -1;
  860. }
  861. n = rb_first(&src->rb_root_in);
  862. while (n) {
  863. child = container_of(n, struct callchain_node, rb_node_in);
  864. n = rb_next(n);
  865. rb_erase(&child->rb_node_in, &src->rb_root_in);
  866. err = merge_chain_branch(cursor, dst, child);
  867. if (err)
  868. break;
  869. free(child);
  870. }
  871. cursor->nr = old_pos;
  872. cursor->last = old_last;
  873. return err;
  874. }
  875. int callchain_merge(struct callchain_cursor *cursor,
  876. struct callchain_root *dst, struct callchain_root *src)
  877. {
  878. return merge_chain_branch(cursor, &dst->node, &src->node);
  879. }
  880. int callchain_cursor_append(struct callchain_cursor *cursor,
  881. u64 ip, struct map *map, struct symbol *sym,
  882. bool branch, struct branch_flags *flags,
  883. int nr_loop_iter, u64 iter_cycles, u64 branch_from,
  884. const char *srcline)
  885. {
  886. struct callchain_cursor_node *node = *cursor->last;
  887. if (!node) {
  888. node = calloc(1, sizeof(*node));
  889. if (!node)
  890. return -ENOMEM;
  891. *cursor->last = node;
  892. }
  893. node->ip = ip;
  894. map__zput(node->map);
  895. node->map = map__get(map);
  896. node->sym = sym;
  897. node->branch = branch;
  898. node->nr_loop_iter = nr_loop_iter;
  899. node->iter_cycles = iter_cycles;
  900. node->srcline = srcline;
  901. if (flags)
  902. memcpy(&node->branch_flags, flags,
  903. sizeof(struct branch_flags));
  904. node->branch_from = branch_from;
  905. cursor->nr++;
  906. cursor->last = &node->next;
  907. return 0;
  908. }
  909. int sample__resolve_callchain(struct perf_sample *sample,
  910. struct callchain_cursor *cursor, struct symbol **parent,
  911. struct perf_evsel *evsel, struct addr_location *al,
  912. int max_stack)
  913. {
  914. if (sample->callchain == NULL && !symbol_conf.show_branchflag_count)
  915. return 0;
  916. if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain ||
  917. perf_hpp_list.parent || symbol_conf.show_branchflag_count) {
  918. return thread__resolve_callchain(al->thread, cursor, evsel, sample,
  919. parent, al, max_stack);
  920. }
  921. return 0;
  922. }
  923. int hist_entry__append_callchain(struct hist_entry *he, struct perf_sample *sample)
  924. {
  925. if ((!symbol_conf.use_callchain || sample->callchain == NULL) &&
  926. !symbol_conf.show_branchflag_count)
  927. return 0;
  928. return callchain_append(he->callchain, &callchain_cursor, sample->period);
  929. }
  930. int fill_callchain_info(struct addr_location *al, struct callchain_cursor_node *node,
  931. bool hide_unresolved)
  932. {
  933. al->map = node->map;
  934. al->sym = node->sym;
  935. al->srcline = node->srcline;
  936. al->addr = node->ip;
  937. if (al->sym == NULL) {
  938. if (hide_unresolved)
  939. return 0;
  940. if (al->map == NULL)
  941. goto out;
  942. }
  943. if (al->map->groups == &al->machine->kmaps) {
  944. if (machine__is_host(al->machine)) {
  945. al->cpumode = PERF_RECORD_MISC_KERNEL;
  946. al->level = 'k';
  947. } else {
  948. al->cpumode = PERF_RECORD_MISC_GUEST_KERNEL;
  949. al->level = 'g';
  950. }
  951. } else {
  952. if (machine__is_host(al->machine)) {
  953. al->cpumode = PERF_RECORD_MISC_USER;
  954. al->level = '.';
  955. } else if (perf_guest) {
  956. al->cpumode = PERF_RECORD_MISC_GUEST_USER;
  957. al->level = 'u';
  958. } else {
  959. al->cpumode = PERF_RECORD_MISC_HYPERVISOR;
  960. al->level = 'H';
  961. }
  962. }
  963. out:
  964. return 1;
  965. }
  966. char *callchain_list__sym_name(struct callchain_list *cl,
  967. char *bf, size_t bfsize, bool show_dso)
  968. {
  969. bool show_addr = callchain_param.key == CCKEY_ADDRESS;
  970. bool show_srcline = show_addr || callchain_param.key == CCKEY_SRCLINE;
  971. int printed;
  972. if (cl->ms.sym) {
  973. const char *inlined = cl->ms.sym->inlined ? " (inlined)" : "";
  974. if (show_srcline && cl->srcline)
  975. printed = scnprintf(bf, bfsize, "%s %s%s",
  976. cl->ms.sym->name, cl->srcline,
  977. inlined);
  978. else
  979. printed = scnprintf(bf, bfsize, "%s%s",
  980. cl->ms.sym->name, inlined);
  981. } else
  982. printed = scnprintf(bf, bfsize, "%#" PRIx64, cl->ip);
  983. if (show_dso)
  984. scnprintf(bf + printed, bfsize - printed, " %s",
  985. cl->ms.map ?
  986. cl->ms.map->dso->short_name :
  987. "unknown");
  988. return bf;
  989. }
  990. char *callchain_node__scnprintf_value(struct callchain_node *node,
  991. char *bf, size_t bfsize, u64 total)
  992. {
  993. double percent = 0.0;
  994. u64 period = callchain_cumul_hits(node);
  995. unsigned count = callchain_cumul_counts(node);
  996. if (callchain_param.mode == CHAIN_FOLDED) {
  997. period = node->hit;
  998. count = node->count;
  999. }
  1000. switch (callchain_param.value) {
  1001. case CCVAL_PERIOD:
  1002. scnprintf(bf, bfsize, "%"PRIu64, period);
  1003. break;
  1004. case CCVAL_COUNT:
  1005. scnprintf(bf, bfsize, "%u", count);
  1006. break;
  1007. case CCVAL_PERCENT:
  1008. default:
  1009. if (total)
  1010. percent = period * 100.0 / total;
  1011. scnprintf(bf, bfsize, "%.2f%%", percent);
  1012. break;
  1013. }
  1014. return bf;
  1015. }
  1016. int callchain_node__fprintf_value(struct callchain_node *node,
  1017. FILE *fp, u64 total)
  1018. {
  1019. double percent = 0.0;
  1020. u64 period = callchain_cumul_hits(node);
  1021. unsigned count = callchain_cumul_counts(node);
  1022. if (callchain_param.mode == CHAIN_FOLDED) {
  1023. period = node->hit;
  1024. count = node->count;
  1025. }
  1026. switch (callchain_param.value) {
  1027. case CCVAL_PERIOD:
  1028. return fprintf(fp, "%"PRIu64, period);
  1029. case CCVAL_COUNT:
  1030. return fprintf(fp, "%u", count);
  1031. case CCVAL_PERCENT:
  1032. default:
  1033. if (total)
  1034. percent = period * 100.0 / total;
  1035. return percent_color_fprintf(fp, "%.2f%%", percent);
  1036. }
  1037. return 0;
  1038. }
  1039. static void callchain_counts_value(struct callchain_node *node,
  1040. u64 *branch_count, u64 *predicted_count,
  1041. u64 *abort_count, u64 *cycles_count)
  1042. {
  1043. struct callchain_list *clist;
  1044. list_for_each_entry(clist, &node->val, list) {
  1045. if (branch_count)
  1046. *branch_count += clist->branch_count;
  1047. if (predicted_count)
  1048. *predicted_count += clist->predicted_count;
  1049. if (abort_count)
  1050. *abort_count += clist->abort_count;
  1051. if (cycles_count)
  1052. *cycles_count += clist->cycles_count;
  1053. }
  1054. }
  1055. static int callchain_node_branch_counts_cumul(struct callchain_node *node,
  1056. u64 *branch_count,
  1057. u64 *predicted_count,
  1058. u64 *abort_count,
  1059. u64 *cycles_count)
  1060. {
  1061. struct callchain_node *child;
  1062. struct rb_node *n;
  1063. n = rb_first(&node->rb_root_in);
  1064. while (n) {
  1065. child = rb_entry(n, struct callchain_node, rb_node_in);
  1066. n = rb_next(n);
  1067. callchain_node_branch_counts_cumul(child, branch_count,
  1068. predicted_count,
  1069. abort_count,
  1070. cycles_count);
  1071. callchain_counts_value(child, branch_count,
  1072. predicted_count, abort_count,
  1073. cycles_count);
  1074. }
  1075. return 0;
  1076. }
  1077. int callchain_branch_counts(struct callchain_root *root,
  1078. u64 *branch_count, u64 *predicted_count,
  1079. u64 *abort_count, u64 *cycles_count)
  1080. {
  1081. if (branch_count)
  1082. *branch_count = 0;
  1083. if (predicted_count)
  1084. *predicted_count = 0;
  1085. if (abort_count)
  1086. *abort_count = 0;
  1087. if (cycles_count)
  1088. *cycles_count = 0;
  1089. return callchain_node_branch_counts_cumul(&root->node,
  1090. branch_count,
  1091. predicted_count,
  1092. abort_count,
  1093. cycles_count);
  1094. }
  1095. static int count_pri64_printf(int idx, const char *str, u64 value, char *bf, int bfsize)
  1096. {
  1097. int printed;
  1098. printed = scnprintf(bf, bfsize, "%s%s:%" PRId64 "", (idx) ? " " : " (", str, value);
  1099. return printed;
  1100. }
  1101. static int count_float_printf(int idx, const char *str, float value,
  1102. char *bf, int bfsize, float threshold)
  1103. {
  1104. int printed;
  1105. if (threshold != 0.0 && value < threshold)
  1106. return 0;
  1107. printed = scnprintf(bf, bfsize, "%s%s:%.1f%%", (idx) ? " " : " (", str, value);
  1108. return printed;
  1109. }
  1110. static int branch_to_str(char *bf, int bfsize,
  1111. u64 branch_count, u64 predicted_count,
  1112. u64 abort_count,
  1113. struct branch_type_stat *brtype_stat)
  1114. {
  1115. int printed, i = 0;
  1116. printed = branch_type_str(brtype_stat, bf, bfsize);
  1117. if (printed)
  1118. i++;
  1119. if (predicted_count < branch_count) {
  1120. printed += count_float_printf(i++, "predicted",
  1121. predicted_count * 100.0 / branch_count,
  1122. bf + printed, bfsize - printed, 0.0);
  1123. }
  1124. if (abort_count) {
  1125. printed += count_float_printf(i++, "abort",
  1126. abort_count * 100.0 / branch_count,
  1127. bf + printed, bfsize - printed, 0.1);
  1128. }
  1129. if (i)
  1130. printed += scnprintf(bf + printed, bfsize - printed, ")");
  1131. return printed;
  1132. }
  1133. static int branch_from_str(char *bf, int bfsize,
  1134. u64 branch_count,
  1135. u64 cycles_count, u64 iter_count,
  1136. u64 iter_cycles, u64 from_count)
  1137. {
  1138. int printed = 0, i = 0;
  1139. u64 cycles, v = 0;
  1140. cycles = cycles_count / branch_count;
  1141. if (cycles) {
  1142. printed += count_pri64_printf(i++, "cycles",
  1143. cycles,
  1144. bf + printed, bfsize - printed);
  1145. }
  1146. if (iter_count && from_count) {
  1147. v = iter_count / from_count;
  1148. if (v) {
  1149. printed += count_pri64_printf(i++, "iter",
  1150. v, bf + printed, bfsize - printed);
  1151. printed += count_pri64_printf(i++, "avg_cycles",
  1152. iter_cycles / iter_count,
  1153. bf + printed, bfsize - printed);
  1154. }
  1155. }
  1156. if (i)
  1157. printed += scnprintf(bf + printed, bfsize - printed, ")");
  1158. return printed;
  1159. }
  1160. static int counts_str_build(char *bf, int bfsize,
  1161. u64 branch_count, u64 predicted_count,
  1162. u64 abort_count, u64 cycles_count,
  1163. u64 iter_count, u64 iter_cycles,
  1164. u64 from_count,
  1165. struct branch_type_stat *brtype_stat)
  1166. {
  1167. int printed;
  1168. if (branch_count == 0)
  1169. return scnprintf(bf, bfsize, " (calltrace)");
  1170. if (brtype_stat->branch_to) {
  1171. printed = branch_to_str(bf, bfsize, branch_count,
  1172. predicted_count, abort_count, brtype_stat);
  1173. } else {
  1174. printed = branch_from_str(bf, bfsize, branch_count,
  1175. cycles_count, iter_count, iter_cycles,
  1176. from_count);
  1177. }
  1178. if (!printed)
  1179. bf[0] = 0;
  1180. return printed;
  1181. }
  1182. static int callchain_counts_printf(FILE *fp, char *bf, int bfsize,
  1183. u64 branch_count, u64 predicted_count,
  1184. u64 abort_count, u64 cycles_count,
  1185. u64 iter_count, u64 iter_cycles,
  1186. u64 from_count,
  1187. struct branch_type_stat *brtype_stat)
  1188. {
  1189. char str[256];
  1190. counts_str_build(str, sizeof(str), branch_count,
  1191. predicted_count, abort_count, cycles_count,
  1192. iter_count, iter_cycles, from_count, brtype_stat);
  1193. if (fp)
  1194. return fprintf(fp, "%s", str);
  1195. return scnprintf(bf, bfsize, "%s", str);
  1196. }
  1197. int callchain_list_counts__printf_value(struct callchain_list *clist,
  1198. FILE *fp, char *bf, int bfsize)
  1199. {
  1200. u64 branch_count, predicted_count;
  1201. u64 abort_count, cycles_count;
  1202. u64 iter_count, iter_cycles;
  1203. u64 from_count;
  1204. branch_count = clist->branch_count;
  1205. predicted_count = clist->predicted_count;
  1206. abort_count = clist->abort_count;
  1207. cycles_count = clist->cycles_count;
  1208. iter_count = clist->iter_count;
  1209. iter_cycles = clist->iter_cycles;
  1210. from_count = clist->from_count;
  1211. return callchain_counts_printf(fp, bf, bfsize, branch_count,
  1212. predicted_count, abort_count,
  1213. cycles_count, iter_count, iter_cycles,
  1214. from_count, &clist->brtype_stat);
  1215. }
  1216. static void free_callchain_node(struct callchain_node *node)
  1217. {
  1218. struct callchain_list *list, *tmp;
  1219. struct callchain_node *child;
  1220. struct rb_node *n;
  1221. list_for_each_entry_safe(list, tmp, &node->parent_val, list) {
  1222. list_del(&list->list);
  1223. map__zput(list->ms.map);
  1224. free(list);
  1225. }
  1226. list_for_each_entry_safe(list, tmp, &node->val, list) {
  1227. list_del(&list->list);
  1228. map__zput(list->ms.map);
  1229. free(list);
  1230. }
  1231. n = rb_first(&node->rb_root_in);
  1232. while (n) {
  1233. child = container_of(n, struct callchain_node, rb_node_in);
  1234. n = rb_next(n);
  1235. rb_erase(&child->rb_node_in, &node->rb_root_in);
  1236. free_callchain_node(child);
  1237. free(child);
  1238. }
  1239. }
  1240. void free_callchain(struct callchain_root *root)
  1241. {
  1242. if (!symbol_conf.use_callchain)
  1243. return;
  1244. free_callchain_node(&root->node);
  1245. }
  1246. static u64 decay_callchain_node(struct callchain_node *node)
  1247. {
  1248. struct callchain_node *child;
  1249. struct rb_node *n;
  1250. u64 child_hits = 0;
  1251. n = rb_first(&node->rb_root_in);
  1252. while (n) {
  1253. child = container_of(n, struct callchain_node, rb_node_in);
  1254. child_hits += decay_callchain_node(child);
  1255. n = rb_next(n);
  1256. }
  1257. node->hit = (node->hit * 7) / 8;
  1258. node->children_hit = child_hits;
  1259. return node->hit;
  1260. }
  1261. void decay_callchain(struct callchain_root *root)
  1262. {
  1263. if (!symbol_conf.use_callchain)
  1264. return;
  1265. decay_callchain_node(&root->node);
  1266. }
  1267. int callchain_node__make_parent_list(struct callchain_node *node)
  1268. {
  1269. struct callchain_node *parent = node->parent;
  1270. struct callchain_list *chain, *new;
  1271. LIST_HEAD(head);
  1272. while (parent) {
  1273. list_for_each_entry_reverse(chain, &parent->val, list) {
  1274. new = malloc(sizeof(*new));
  1275. if (new == NULL)
  1276. goto out;
  1277. *new = *chain;
  1278. new->has_children = false;
  1279. map__get(new->ms.map);
  1280. list_add_tail(&new->list, &head);
  1281. }
  1282. parent = parent->parent;
  1283. }
  1284. list_for_each_entry_safe_reverse(chain, new, &head, list)
  1285. list_move_tail(&chain->list, &node->parent_val);
  1286. if (!list_empty(&node->parent_val)) {
  1287. chain = list_first_entry(&node->parent_val, struct callchain_list, list);
  1288. chain->has_children = rb_prev(&node->rb_node) || rb_next(&node->rb_node);
  1289. chain = list_first_entry(&node->val, struct callchain_list, list);
  1290. chain->has_children = false;
  1291. }
  1292. return 0;
  1293. out:
  1294. list_for_each_entry_safe(chain, new, &head, list) {
  1295. list_del(&chain->list);
  1296. map__zput(chain->ms.map);
  1297. free(chain);
  1298. }
  1299. return -ENOMEM;
  1300. }
  1301. int callchain_cursor__copy(struct callchain_cursor *dst,
  1302. struct callchain_cursor *src)
  1303. {
  1304. int rc = 0;
  1305. callchain_cursor_reset(dst);
  1306. callchain_cursor_commit(src);
  1307. while (true) {
  1308. struct callchain_cursor_node *node;
  1309. node = callchain_cursor_current(src);
  1310. if (node == NULL)
  1311. break;
  1312. rc = callchain_cursor_append(dst, node->ip, node->map, node->sym,
  1313. node->branch, &node->branch_flags,
  1314. node->nr_loop_iter,
  1315. node->iter_cycles,
  1316. node->branch_from, node->srcline);
  1317. if (rc)
  1318. break;
  1319. callchain_cursor_advance(src);
  1320. }
  1321. return rc;
  1322. }