record.c 6.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288
  1. #include "evlist.h"
  2. #include "evsel.h"
  3. #include "cpumap.h"
  4. #include "parse-events.h"
  5. #include <api/fs/fs.h>
  6. #include "util.h"
  7. #include "cloexec.h"
  8. typedef void (*setup_probe_fn_t)(struct perf_evsel *evsel);
  9. static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
  10. {
  11. struct perf_evlist *evlist;
  12. struct perf_evsel *evsel;
  13. unsigned long flags = perf_event_open_cloexec_flag();
  14. int err = -EAGAIN, fd;
  15. static pid_t pid = -1;
  16. evlist = perf_evlist__new();
  17. if (!evlist)
  18. return -ENOMEM;
  19. if (parse_events(evlist, str, NULL))
  20. goto out_delete;
  21. evsel = perf_evlist__first(evlist);
  22. while (1) {
  23. fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
  24. if (fd < 0) {
  25. if (pid == -1 && errno == EACCES) {
  26. pid = 0;
  27. continue;
  28. }
  29. goto out_delete;
  30. }
  31. break;
  32. }
  33. close(fd);
  34. fn(evsel);
  35. fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1, flags);
  36. if (fd < 0) {
  37. if (errno == EINVAL)
  38. err = -EINVAL;
  39. goto out_delete;
  40. }
  41. close(fd);
  42. err = 0;
  43. out_delete:
  44. perf_evlist__delete(evlist);
  45. return err;
  46. }
  47. static bool perf_probe_api(setup_probe_fn_t fn)
  48. {
  49. const char *try[] = {"cycles:u", "instructions:u", "cpu-clock:u", NULL};
  50. struct cpu_map *cpus;
  51. int cpu, ret, i = 0;
  52. cpus = cpu_map__new(NULL);
  53. if (!cpus)
  54. return false;
  55. cpu = cpus->map[0];
  56. cpu_map__put(cpus);
  57. do {
  58. ret = perf_do_probe_api(fn, cpu, try[i++]);
  59. if (!ret)
  60. return true;
  61. } while (ret == -EAGAIN && try[i]);
  62. return false;
  63. }
  64. static void perf_probe_sample_identifier(struct perf_evsel *evsel)
  65. {
  66. evsel->attr.sample_type |= PERF_SAMPLE_IDENTIFIER;
  67. }
  68. static void perf_probe_comm_exec(struct perf_evsel *evsel)
  69. {
  70. evsel->attr.comm_exec = 1;
  71. }
  72. static void perf_probe_context_switch(struct perf_evsel *evsel)
  73. {
  74. evsel->attr.context_switch = 1;
  75. }
  76. bool perf_can_sample_identifier(void)
  77. {
  78. return perf_probe_api(perf_probe_sample_identifier);
  79. }
  80. static bool perf_can_comm_exec(void)
  81. {
  82. return perf_probe_api(perf_probe_comm_exec);
  83. }
  84. bool perf_can_record_switch_events(void)
  85. {
  86. return perf_probe_api(perf_probe_context_switch);
  87. }
  88. bool perf_can_record_cpu_wide(void)
  89. {
  90. struct perf_event_attr attr = {
  91. .type = PERF_TYPE_SOFTWARE,
  92. .config = PERF_COUNT_SW_CPU_CLOCK,
  93. .exclude_kernel = 1,
  94. };
  95. struct cpu_map *cpus;
  96. int cpu, fd;
  97. cpus = cpu_map__new(NULL);
  98. if (!cpus)
  99. return false;
  100. cpu = cpus->map[0];
  101. cpu_map__put(cpus);
  102. fd = sys_perf_event_open(&attr, -1, cpu, -1, 0);
  103. if (fd < 0)
  104. return false;
  105. close(fd);
  106. return true;
  107. }
  108. void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts,
  109. struct callchain_param *callchain)
  110. {
  111. struct perf_evsel *evsel;
  112. bool use_sample_identifier = false;
  113. bool use_comm_exec;
  114. /*
  115. * Set the evsel leader links before we configure attributes,
  116. * since some might depend on this info.
  117. */
  118. if (opts->group)
  119. perf_evlist__set_leader(evlist);
  120. if (evlist->cpus->map[0] < 0)
  121. opts->no_inherit = true;
  122. use_comm_exec = perf_can_comm_exec();
  123. evlist__for_each_entry(evlist, evsel) {
  124. perf_evsel__config(evsel, opts, callchain);
  125. if (evsel->tracking && use_comm_exec)
  126. evsel->attr.comm_exec = 1;
  127. }
  128. if (opts->full_auxtrace) {
  129. /*
  130. * Need to be able to synthesize and parse selected events with
  131. * arbitrary sample types, which requires always being able to
  132. * match the id.
  133. */
  134. use_sample_identifier = perf_can_sample_identifier();
  135. evlist__for_each_entry(evlist, evsel)
  136. perf_evsel__set_sample_id(evsel, use_sample_identifier);
  137. } else if (evlist->nr_entries > 1) {
  138. struct perf_evsel *first = perf_evlist__first(evlist);
  139. evlist__for_each_entry(evlist, evsel) {
  140. if (evsel->attr.sample_type == first->attr.sample_type)
  141. continue;
  142. use_sample_identifier = perf_can_sample_identifier();
  143. break;
  144. }
  145. evlist__for_each_entry(evlist, evsel)
  146. perf_evsel__set_sample_id(evsel, use_sample_identifier);
  147. }
  148. perf_evlist__set_id_pos(evlist);
  149. }
  150. static int get_max_rate(unsigned int *rate)
  151. {
  152. return sysctl__read_int("kernel/perf_event_max_sample_rate", (int *)rate);
  153. }
  154. static int record_opts__config_freq(struct record_opts *opts)
  155. {
  156. bool user_freq = opts->user_freq != UINT_MAX;
  157. unsigned int max_rate;
  158. if (opts->user_interval != ULLONG_MAX)
  159. opts->default_interval = opts->user_interval;
  160. if (user_freq)
  161. opts->freq = opts->user_freq;
  162. /*
  163. * User specified count overrides default frequency.
  164. */
  165. if (opts->default_interval)
  166. opts->freq = 0;
  167. else if (opts->freq) {
  168. opts->default_interval = opts->freq;
  169. } else {
  170. pr_err("frequency and count are zero, aborting\n");
  171. return -1;
  172. }
  173. if (get_max_rate(&max_rate))
  174. return 0;
  175. /*
  176. * User specified frequency is over current maximum.
  177. */
  178. if (user_freq && (max_rate < opts->freq)) {
  179. pr_err("Maximum frequency rate (%u) reached.\n"
  180. "Please use -F freq option with lower value or consider\n"
  181. "tweaking /proc/sys/kernel/perf_event_max_sample_rate.\n",
  182. max_rate);
  183. return -1;
  184. }
  185. /*
  186. * Default frequency is over current maximum.
  187. */
  188. if (max_rate < opts->freq) {
  189. pr_warning("Lowering default frequency rate to %u.\n"
  190. "Please consider tweaking "
  191. "/proc/sys/kernel/perf_event_max_sample_rate.\n",
  192. max_rate);
  193. opts->freq = max_rate;
  194. }
  195. return 0;
  196. }
  197. int record_opts__config(struct record_opts *opts)
  198. {
  199. return record_opts__config_freq(opts);
  200. }
  201. bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
  202. {
  203. struct perf_evlist *temp_evlist;
  204. struct perf_evsel *evsel;
  205. int err, fd, cpu;
  206. bool ret = false;
  207. pid_t pid = -1;
  208. temp_evlist = perf_evlist__new();
  209. if (!temp_evlist)
  210. return false;
  211. err = parse_events(temp_evlist, str, NULL);
  212. if (err)
  213. goto out_delete;
  214. evsel = perf_evlist__last(temp_evlist);
  215. if (!evlist || cpu_map__empty(evlist->cpus)) {
  216. struct cpu_map *cpus = cpu_map__new(NULL);
  217. cpu = cpus ? cpus->map[0] : 0;
  218. cpu_map__put(cpus);
  219. } else {
  220. cpu = evlist->cpus->map[0];
  221. }
  222. while (1) {
  223. fd = sys_perf_event_open(&evsel->attr, pid, cpu, -1,
  224. perf_event_open_cloexec_flag());
  225. if (fd < 0) {
  226. if (pid == -1 && errno == EACCES) {
  227. pid = 0;
  228. continue;
  229. }
  230. goto out_delete;
  231. }
  232. break;
  233. }
  234. close(fd);
  235. ret = true;
  236. out_delete:
  237. perf_evlist__delete(temp_evlist);
  238. return ret;
  239. }