intel-bts.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463
  1. /*
  2. * intel-bts.c: Intel Processor Trace support
  3. * Copyright (c) 2013-2015, Intel Corporation.
  4. *
  5. * This program is free software; you can redistribute it and/or modify it
  6. * under the terms and conditions of the GNU General Public License,
  7. * version 2, as published by the Free Software Foundation.
  8. *
  9. * This program is distributed in the hope it will be useful, but WITHOUT
  10. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  12. * more details.
  13. *
  14. */
  15. #include <errno.h>
  16. #include <linux/kernel.h>
  17. #include <linux/types.h>
  18. #include <linux/bitops.h>
  19. #include <linux/log2.h>
  20. #include "../../util/cpumap.h"
  21. #include "../../util/evsel.h"
  22. #include "../../util/evlist.h"
  23. #include "../../util/session.h"
  24. #include "../../util/util.h"
  25. #include "../../util/pmu.h"
  26. #include "../../util/debug.h"
  27. #include "../../util/tsc.h"
  28. #include "../../util/auxtrace.h"
  29. #include "../../util/intel-bts.h"
  30. #define KiB(x) ((x) * 1024)
  31. #define MiB(x) ((x) * 1024 * 1024)
  32. #define KiB_MASK(x) (KiB(x) - 1)
  33. #define MiB_MASK(x) (MiB(x) - 1)
  34. struct intel_bts_snapshot_ref {
  35. void *ref_buf;
  36. size_t ref_offset;
  37. bool wrapped;
  38. };
  39. struct intel_bts_recording {
  40. struct auxtrace_record itr;
  41. struct perf_pmu *intel_bts_pmu;
  42. struct perf_evlist *evlist;
  43. bool snapshot_mode;
  44. size_t snapshot_size;
  45. int snapshot_ref_cnt;
  46. struct intel_bts_snapshot_ref *snapshot_refs;
  47. };
  48. struct branch {
  49. u64 from;
  50. u64 to;
  51. u64 misc;
  52. };
  53. static size_t
  54. intel_bts_info_priv_size(struct auxtrace_record *itr __maybe_unused,
  55. struct perf_evlist *evlist __maybe_unused)
  56. {
  57. return INTEL_BTS_AUXTRACE_PRIV_SIZE;
  58. }
  59. static int intel_bts_info_fill(struct auxtrace_record *itr,
  60. struct perf_session *session,
  61. struct auxtrace_info_event *auxtrace_info,
  62. size_t priv_size)
  63. {
  64. struct intel_bts_recording *btsr =
  65. container_of(itr, struct intel_bts_recording, itr);
  66. struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
  67. struct perf_event_mmap_page *pc;
  68. struct perf_tsc_conversion tc = { .time_mult = 0, };
  69. bool cap_user_time_zero = false;
  70. int err;
  71. if (priv_size != INTEL_BTS_AUXTRACE_PRIV_SIZE)
  72. return -EINVAL;
  73. if (!session->evlist->nr_mmaps)
  74. return -EINVAL;
  75. pc = session->evlist->mmap[0].base;
  76. if (pc) {
  77. err = perf_read_tsc_conversion(pc, &tc);
  78. if (err) {
  79. if (err != -EOPNOTSUPP)
  80. return err;
  81. } else {
  82. cap_user_time_zero = tc.time_mult != 0;
  83. }
  84. if (!cap_user_time_zero)
  85. ui__warning("Intel BTS: TSC not available\n");
  86. }
  87. auxtrace_info->type = PERF_AUXTRACE_INTEL_BTS;
  88. auxtrace_info->priv[INTEL_BTS_PMU_TYPE] = intel_bts_pmu->type;
  89. auxtrace_info->priv[INTEL_BTS_TIME_SHIFT] = tc.time_shift;
  90. auxtrace_info->priv[INTEL_BTS_TIME_MULT] = tc.time_mult;
  91. auxtrace_info->priv[INTEL_BTS_TIME_ZERO] = tc.time_zero;
  92. auxtrace_info->priv[INTEL_BTS_CAP_USER_TIME_ZERO] = cap_user_time_zero;
  93. auxtrace_info->priv[INTEL_BTS_SNAPSHOT_MODE] = btsr->snapshot_mode;
  94. return 0;
  95. }
  96. static int intel_bts_recording_options(struct auxtrace_record *itr,
  97. struct perf_evlist *evlist,
  98. struct record_opts *opts)
  99. {
  100. struct intel_bts_recording *btsr =
  101. container_of(itr, struct intel_bts_recording, itr);
  102. struct perf_pmu *intel_bts_pmu = btsr->intel_bts_pmu;
  103. struct perf_evsel *evsel, *intel_bts_evsel = NULL;
  104. const struct cpu_map *cpus = evlist->cpus;
  105. bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
  106. btsr->evlist = evlist;
  107. btsr->snapshot_mode = opts->auxtrace_snapshot_mode;
  108. evlist__for_each_entry(evlist, evsel) {
  109. if (evsel->attr.type == intel_bts_pmu->type) {
  110. if (intel_bts_evsel) {
  111. pr_err("There may be only one " INTEL_BTS_PMU_NAME " event\n");
  112. return -EINVAL;
  113. }
  114. evsel->attr.freq = 0;
  115. evsel->attr.sample_period = 1;
  116. intel_bts_evsel = evsel;
  117. opts->full_auxtrace = true;
  118. }
  119. }
  120. if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
  121. pr_err("Snapshot mode (-S option) requires " INTEL_BTS_PMU_NAME " PMU event (-e " INTEL_BTS_PMU_NAME ")\n");
  122. return -EINVAL;
  123. }
  124. if (!opts->full_auxtrace)
  125. return 0;
  126. if (opts->full_auxtrace && !cpu_map__empty(cpus)) {
  127. pr_err(INTEL_BTS_PMU_NAME " does not support per-cpu recording\n");
  128. return -EINVAL;
  129. }
  130. /* Set default sizes for snapshot mode */
  131. if (opts->auxtrace_snapshot_mode) {
  132. if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
  133. if (privileged) {
  134. opts->auxtrace_mmap_pages = MiB(4) / page_size;
  135. } else {
  136. opts->auxtrace_mmap_pages = KiB(128) / page_size;
  137. if (opts->mmap_pages == UINT_MAX)
  138. opts->mmap_pages = KiB(256) / page_size;
  139. }
  140. } else if (!opts->auxtrace_mmap_pages && !privileged &&
  141. opts->mmap_pages == UINT_MAX) {
  142. opts->mmap_pages = KiB(256) / page_size;
  143. }
  144. if (!opts->auxtrace_snapshot_size)
  145. opts->auxtrace_snapshot_size =
  146. opts->auxtrace_mmap_pages * (size_t)page_size;
  147. if (!opts->auxtrace_mmap_pages) {
  148. size_t sz = opts->auxtrace_snapshot_size;
  149. sz = round_up(sz, page_size) / page_size;
  150. opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
  151. }
  152. if (opts->auxtrace_snapshot_size >
  153. opts->auxtrace_mmap_pages * (size_t)page_size) {
  154. pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
  155. opts->auxtrace_snapshot_size,
  156. opts->auxtrace_mmap_pages * (size_t)page_size);
  157. return -EINVAL;
  158. }
  159. if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
  160. pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
  161. return -EINVAL;
  162. }
  163. pr_debug2("Intel BTS snapshot size: %zu\n",
  164. opts->auxtrace_snapshot_size);
  165. }
  166. /* Set default sizes for full trace mode */
  167. if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
  168. if (privileged) {
  169. opts->auxtrace_mmap_pages = MiB(4) / page_size;
  170. } else {
  171. opts->auxtrace_mmap_pages = KiB(128) / page_size;
  172. if (opts->mmap_pages == UINT_MAX)
  173. opts->mmap_pages = KiB(256) / page_size;
  174. }
  175. }
  176. /* Validate auxtrace_mmap_pages */
  177. if (opts->auxtrace_mmap_pages) {
  178. size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
  179. size_t min_sz;
  180. if (opts->auxtrace_snapshot_mode)
  181. min_sz = KiB(4);
  182. else
  183. min_sz = KiB(8);
  184. if (sz < min_sz || !is_power_of_2(sz)) {
  185. pr_err("Invalid mmap size for Intel BTS: must be at least %zuKiB and a power of 2\n",
  186. min_sz / 1024);
  187. return -EINVAL;
  188. }
  189. }
  190. if (intel_bts_evsel) {
  191. /*
  192. * To obtain the auxtrace buffer file descriptor, the auxtrace event
  193. * must come first.
  194. */
  195. perf_evlist__to_front(evlist, intel_bts_evsel);
  196. /*
  197. * In the case of per-cpu mmaps, we need the CPU on the
  198. * AUX event.
  199. */
  200. if (!cpu_map__empty(cpus))
  201. perf_evsel__set_sample_bit(intel_bts_evsel, CPU);
  202. }
  203. /* Add dummy event to keep tracking */
  204. if (opts->full_auxtrace) {
  205. struct perf_evsel *tracking_evsel;
  206. int err;
  207. err = parse_events(evlist, "dummy:u", NULL);
  208. if (err)
  209. return err;
  210. tracking_evsel = perf_evlist__last(evlist);
  211. perf_evlist__set_tracking_event(evlist, tracking_evsel);
  212. tracking_evsel->attr.freq = 0;
  213. tracking_evsel->attr.sample_period = 1;
  214. }
  215. return 0;
  216. }
  217. static int intel_bts_parse_snapshot_options(struct auxtrace_record *itr,
  218. struct record_opts *opts,
  219. const char *str)
  220. {
  221. struct intel_bts_recording *btsr =
  222. container_of(itr, struct intel_bts_recording, itr);
  223. unsigned long long snapshot_size = 0;
  224. char *endptr;
  225. if (str) {
  226. snapshot_size = strtoull(str, &endptr, 0);
  227. if (*endptr || snapshot_size > SIZE_MAX)
  228. return -1;
  229. }
  230. opts->auxtrace_snapshot_mode = true;
  231. opts->auxtrace_snapshot_size = snapshot_size;
  232. btsr->snapshot_size = snapshot_size;
  233. return 0;
  234. }
  235. static u64 intel_bts_reference(struct auxtrace_record *itr __maybe_unused)
  236. {
  237. return rdtsc();
  238. }
  239. static int intel_bts_alloc_snapshot_refs(struct intel_bts_recording *btsr,
  240. int idx)
  241. {
  242. const size_t sz = sizeof(struct intel_bts_snapshot_ref);
  243. int cnt = btsr->snapshot_ref_cnt, new_cnt = cnt * 2;
  244. struct intel_bts_snapshot_ref *refs;
  245. if (!new_cnt)
  246. new_cnt = 16;
  247. while (new_cnt <= idx)
  248. new_cnt *= 2;
  249. refs = calloc(new_cnt, sz);
  250. if (!refs)
  251. return -ENOMEM;
  252. memcpy(refs, btsr->snapshot_refs, cnt * sz);
  253. btsr->snapshot_refs = refs;
  254. btsr->snapshot_ref_cnt = new_cnt;
  255. return 0;
  256. }
  257. static void intel_bts_free_snapshot_refs(struct intel_bts_recording *btsr)
  258. {
  259. int i;
  260. for (i = 0; i < btsr->snapshot_ref_cnt; i++)
  261. zfree(&btsr->snapshot_refs[i].ref_buf);
  262. zfree(&btsr->snapshot_refs);
  263. }
  264. static void intel_bts_recording_free(struct auxtrace_record *itr)
  265. {
  266. struct intel_bts_recording *btsr =
  267. container_of(itr, struct intel_bts_recording, itr);
  268. intel_bts_free_snapshot_refs(btsr);
  269. free(btsr);
  270. }
  271. static int intel_bts_snapshot_start(struct auxtrace_record *itr)
  272. {
  273. struct intel_bts_recording *btsr =
  274. container_of(itr, struct intel_bts_recording, itr);
  275. struct perf_evsel *evsel;
  276. evlist__for_each_entry(btsr->evlist, evsel) {
  277. if (evsel->attr.type == btsr->intel_bts_pmu->type)
  278. return perf_evsel__disable(evsel);
  279. }
  280. return -EINVAL;
  281. }
  282. static int intel_bts_snapshot_finish(struct auxtrace_record *itr)
  283. {
  284. struct intel_bts_recording *btsr =
  285. container_of(itr, struct intel_bts_recording, itr);
  286. struct perf_evsel *evsel;
  287. evlist__for_each_entry(btsr->evlist, evsel) {
  288. if (evsel->attr.type == btsr->intel_bts_pmu->type)
  289. return perf_evsel__enable(evsel);
  290. }
  291. return -EINVAL;
  292. }
  293. static bool intel_bts_first_wrap(u64 *data, size_t buf_size)
  294. {
  295. int i, a, b;
  296. b = buf_size >> 3;
  297. a = b - 512;
  298. if (a < 0)
  299. a = 0;
  300. for (i = a; i < b; i++) {
  301. if (data[i])
  302. return true;
  303. }
  304. return false;
  305. }
  306. static int intel_bts_find_snapshot(struct auxtrace_record *itr, int idx,
  307. struct auxtrace_mmap *mm, unsigned char *data,
  308. u64 *head, u64 *old)
  309. {
  310. struct intel_bts_recording *btsr =
  311. container_of(itr, struct intel_bts_recording, itr);
  312. bool wrapped;
  313. int err;
  314. pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
  315. __func__, idx, (size_t)*old, (size_t)*head);
  316. if (idx >= btsr->snapshot_ref_cnt) {
  317. err = intel_bts_alloc_snapshot_refs(btsr, idx);
  318. if (err)
  319. goto out_err;
  320. }
  321. wrapped = btsr->snapshot_refs[idx].wrapped;
  322. if (!wrapped && intel_bts_first_wrap((u64 *)data, mm->len)) {
  323. btsr->snapshot_refs[idx].wrapped = true;
  324. wrapped = true;
  325. }
  326. /*
  327. * In full trace mode 'head' continually increases. However in snapshot
  328. * mode 'head' is an offset within the buffer. Here 'old' and 'head'
  329. * are adjusted to match the full trace case which expects that 'old' is
  330. * always less than 'head'.
  331. */
  332. if (wrapped) {
  333. *old = *head;
  334. *head += mm->len;
  335. } else {
  336. if (mm->mask)
  337. *old &= mm->mask;
  338. else
  339. *old %= mm->len;
  340. if (*old > *head)
  341. *head += mm->len;
  342. }
  343. pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
  344. __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
  345. return 0;
  346. out_err:
  347. pr_err("%s: failed, error %d\n", __func__, err);
  348. return err;
  349. }
  350. static int intel_bts_read_finish(struct auxtrace_record *itr, int idx)
  351. {
  352. struct intel_bts_recording *btsr =
  353. container_of(itr, struct intel_bts_recording, itr);
  354. struct perf_evsel *evsel;
  355. evlist__for_each_entry(btsr->evlist, evsel) {
  356. if (evsel->attr.type == btsr->intel_bts_pmu->type)
  357. return perf_evlist__enable_event_idx(btsr->evlist,
  358. evsel, idx);
  359. }
  360. return -EINVAL;
  361. }
  362. struct auxtrace_record *intel_bts_recording_init(int *err)
  363. {
  364. struct perf_pmu *intel_bts_pmu = perf_pmu__find(INTEL_BTS_PMU_NAME);
  365. struct intel_bts_recording *btsr;
  366. if (!intel_bts_pmu)
  367. return NULL;
  368. if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) {
  369. *err = -errno;
  370. return NULL;
  371. }
  372. btsr = zalloc(sizeof(struct intel_bts_recording));
  373. if (!btsr) {
  374. *err = -ENOMEM;
  375. return NULL;
  376. }
  377. btsr->intel_bts_pmu = intel_bts_pmu;
  378. btsr->itr.recording_options = intel_bts_recording_options;
  379. btsr->itr.info_priv_size = intel_bts_info_priv_size;
  380. btsr->itr.info_fill = intel_bts_info_fill;
  381. btsr->itr.free = intel_bts_recording_free;
  382. btsr->itr.snapshot_start = intel_bts_snapshot_start;
  383. btsr->itr.snapshot_finish = intel_bts_snapshot_finish;
  384. btsr->itr.find_snapshot = intel_bts_find_snapshot;
  385. btsr->itr.parse_snapshot_options = intel_bts_parse_snapshot_options;
  386. btsr->itr.reference = intel_bts_reference;
  387. btsr->itr.read_finish = intel_bts_read_finish;
  388. btsr->itr.alignment = sizeof(struct branch);
  389. return &btsr->itr;
  390. }