top.c 6.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239
  1. /*
  2. * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
  3. *
  4. * Refactored from builtin-top.c, see that files for further copyright notes.
  5. *
  6. * Released under the GPL v2. (and only v2, not any later version)
  7. */
  8. #include "cpumap.h"
  9. #include "event.h"
  10. #include "evlist.h"
  11. #include "evsel.h"
  12. #include "parse-events.h"
  13. #include "symbol.h"
  14. #include "top.h"
  15. #include <inttypes.h>
  16. /*
  17. * Ordering weight: count-1 * count-2 * ... / count-n
  18. */
  19. static double sym_weight(const struct sym_entry *sym, struct perf_top *top)
  20. {
  21. double weight = sym->snap_count;
  22. int counter;
  23. if (!top->display_weighted)
  24. return weight;
  25. for (counter = 1; counter < top->evlist->nr_entries - 1; counter++)
  26. weight *= sym->count[counter];
  27. weight /= (sym->count[counter] + 1);
  28. return weight;
  29. }
  30. static void perf_top__remove_active_sym(struct perf_top *top, struct sym_entry *syme)
  31. {
  32. pthread_mutex_lock(&top->active_symbols_lock);
  33. list_del_init(&syme->node);
  34. pthread_mutex_unlock(&top->active_symbols_lock);
  35. }
  36. static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
  37. {
  38. struct rb_node **p = &tree->rb_node;
  39. struct rb_node *parent = NULL;
  40. struct sym_entry *iter;
  41. while (*p != NULL) {
  42. parent = *p;
  43. iter = rb_entry(parent, struct sym_entry, rb_node);
  44. if (se->weight > iter->weight)
  45. p = &(*p)->rb_left;
  46. else
  47. p = &(*p)->rb_right;
  48. }
  49. rb_link_node(&se->rb_node, parent, p);
  50. rb_insert_color(&se->rb_node, tree);
  51. }
  52. #define SNPRINTF(buf, size, fmt, args...) \
  53. ({ \
  54. size_t r = snprintf(buf, size, fmt, ## args); \
  55. r > size ? size : r; \
  56. })
  57. size_t perf_top__header_snprintf(struct perf_top *top, char *bf, size_t size)
  58. {
  59. struct perf_evsel *counter;
  60. float samples_per_sec = top->samples / top->delay_secs;
  61. float ksamples_per_sec = top->kernel_samples / top->delay_secs;
  62. float esamples_percent = (100.0 * top->exact_samples) / top->samples;
  63. size_t ret = 0;
  64. if (!perf_guest) {
  65. ret = SNPRINTF(bf, size,
  66. " PerfTop:%8.0f irqs/sec kernel:%4.1f%%"
  67. " exact: %4.1f%% [", samples_per_sec,
  68. 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
  69. samples_per_sec)),
  70. esamples_percent);
  71. } else {
  72. float us_samples_per_sec = top->us_samples / top->delay_secs;
  73. float guest_kernel_samples_per_sec = top->guest_kernel_samples / top->delay_secs;
  74. float guest_us_samples_per_sec = top->guest_us_samples / top->delay_secs;
  75. ret = SNPRINTF(bf, size,
  76. " PerfTop:%8.0f irqs/sec kernel:%4.1f%% us:%4.1f%%"
  77. " guest kernel:%4.1f%% guest us:%4.1f%%"
  78. " exact: %4.1f%% [", samples_per_sec,
  79. 100.0 - (100.0 * ((samples_per_sec - ksamples_per_sec) /
  80. samples_per_sec)),
  81. 100.0 - (100.0 * ((samples_per_sec - us_samples_per_sec) /
  82. samples_per_sec)),
  83. 100.0 - (100.0 * ((samples_per_sec -
  84. guest_kernel_samples_per_sec) /
  85. samples_per_sec)),
  86. 100.0 - (100.0 * ((samples_per_sec -
  87. guest_us_samples_per_sec) /
  88. samples_per_sec)),
  89. esamples_percent);
  90. }
  91. if (top->evlist->nr_entries == 1 || !top->display_weighted) {
  92. struct perf_evsel *first;
  93. first = list_entry(top->evlist->entries.next, struct perf_evsel, node);
  94. ret += SNPRINTF(bf + ret, size - ret, "%" PRIu64 "%s ",
  95. (uint64_t)first->attr.sample_period,
  96. top->freq ? "Hz" : "");
  97. }
  98. if (!top->display_weighted) {
  99. ret += SNPRINTF(bf + ret, size - ret, "%s",
  100. event_name(top->sym_evsel));
  101. } else {
  102. /*
  103. * Don't let events eat all the space. Leaving 30 bytes
  104. * for the rest should be enough.
  105. */
  106. size_t last_pos = size - 30;
  107. list_for_each_entry(counter, &top->evlist->entries, node) {
  108. ret += SNPRINTF(bf + ret, size - ret, "%s%s",
  109. counter->idx ? "/" : "",
  110. event_name(counter));
  111. if (ret > last_pos) {
  112. sprintf(bf + last_pos - 3, "..");
  113. ret = last_pos - 1;
  114. break;
  115. }
  116. }
  117. }
  118. ret += SNPRINTF(bf + ret, size - ret, "], ");
  119. if (top->target_pid != -1)
  120. ret += SNPRINTF(bf + ret, size - ret, " (target_pid: %d",
  121. top->target_pid);
  122. else if (top->target_tid != -1)
  123. ret += SNPRINTF(bf + ret, size - ret, " (target_tid: %d",
  124. top->target_tid);
  125. else
  126. ret += SNPRINTF(bf + ret, size - ret, " (all");
  127. if (top->cpu_list)
  128. ret += SNPRINTF(bf + ret, size - ret, ", CPU%s: %s)",
  129. top->evlist->cpus->nr > 1 ? "s" : "", top->cpu_list);
  130. else {
  131. if (top->target_tid != -1)
  132. ret += SNPRINTF(bf + ret, size - ret, ")");
  133. else
  134. ret += SNPRINTF(bf + ret, size - ret, ", %d CPU%s)",
  135. top->evlist->cpus->nr,
  136. top->evlist->cpus->nr > 1 ? "s" : "");
  137. }
  138. return ret;
  139. }
  140. void perf_top__reset_sample_counters(struct perf_top *top)
  141. {
  142. top->samples = top->us_samples = top->kernel_samples =
  143. top->exact_samples = top->guest_kernel_samples =
  144. top->guest_us_samples = 0;
  145. }
  146. float perf_top__decay_samples(struct perf_top *top, struct rb_root *root)
  147. {
  148. struct sym_entry *syme, *n;
  149. float sum_ksamples = 0.0;
  150. int snap = !top->display_weighted ? top->sym_evsel->idx : 0, j;
  151. /* Sort the active symbols */
  152. pthread_mutex_lock(&top->active_symbols_lock);
  153. syme = list_entry(top->active_symbols.next, struct sym_entry, node);
  154. pthread_mutex_unlock(&top->active_symbols_lock);
  155. top->rb_entries = 0;
  156. list_for_each_entry_safe_from(syme, n, &top->active_symbols, node) {
  157. syme->snap_count = syme->count[snap];
  158. if (syme->snap_count != 0) {
  159. if ((top->hide_user_symbols &&
  160. syme->map->dso->kernel == DSO_TYPE_USER) ||
  161. (top->hide_kernel_symbols &&
  162. syme->map->dso->kernel == DSO_TYPE_KERNEL)) {
  163. perf_top__remove_active_sym(top, syme);
  164. continue;
  165. }
  166. syme->weight = sym_weight(syme, top);
  167. if ((int)syme->snap_count >= top->count_filter) {
  168. rb_insert_active_sym(root, syme);
  169. ++top->rb_entries;
  170. }
  171. sum_ksamples += syme->snap_count;
  172. for (j = 0; j < top->evlist->nr_entries; j++)
  173. syme->count[j] = top->zero ? 0 : syme->count[j] * 7 / 8;
  174. } else
  175. perf_top__remove_active_sym(top, syme);
  176. }
  177. return sum_ksamples;
  178. }
  179. /*
  180. * Find the longest symbol name that will be displayed
  181. */
  182. void perf_top__find_widths(struct perf_top *top, struct rb_root *root,
  183. int *dso_width, int *dso_short_width, int *sym_width)
  184. {
  185. struct rb_node *nd;
  186. int printed = 0;
  187. *sym_width = *dso_width = *dso_short_width = 0;
  188. for (nd = rb_first(root); nd; nd = rb_next(nd)) {
  189. struct sym_entry *syme = rb_entry(nd, struct sym_entry, rb_node);
  190. struct symbol *sym = sym_entry__symbol(syme);
  191. if (++printed > top->print_entries ||
  192. (int)syme->snap_count < top->count_filter)
  193. continue;
  194. if (syme->map->dso->long_name_len > *dso_width)
  195. *dso_width = syme->map->dso->long_name_len;
  196. if (syme->map->dso->short_name_len > *dso_short_width)
  197. *dso_short_width = syme->map->dso->short_name_len;
  198. if (sym->namelen > *sym_width)
  199. *sym_width = sym->namelen;
  200. }
  201. }