data-convert-bt.c 39 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678
  1. /*
  2. * CTF writing support via babeltrace.
  3. *
  4. * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
  5. * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
  6. *
  7. * Released under the GPL v2. (and only v2, not any later version)
  8. */
  9. #include <errno.h>
  10. #include <inttypes.h>
  11. #include <linux/compiler.h>
  12. #include <linux/kernel.h>
  13. #include <babeltrace/ctf-writer/writer.h>
  14. #include <babeltrace/ctf-writer/clock.h>
  15. #include <babeltrace/ctf-writer/stream.h>
  16. #include <babeltrace/ctf-writer/event.h>
  17. #include <babeltrace/ctf-writer/event-types.h>
  18. #include <babeltrace/ctf-writer/event-fields.h>
  19. #include <babeltrace/ctf-ir/utils.h>
  20. #include <babeltrace/ctf/events.h>
  21. #include <traceevent/event-parse.h>
  22. #include "asm/bug.h"
  23. #include "data-convert-bt.h"
  24. #include "session.h"
  25. #include "util.h"
  26. #include "debug.h"
  27. #include "tool.h"
  28. #include "evlist.h"
  29. #include "evsel.h"
  30. #include "machine.h"
  31. #include "config.h"
  32. #include "sane_ctype.h"
  33. #define pr_N(n, fmt, ...) \
  34. eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
  35. #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  36. #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
  37. #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
  38. struct evsel_priv {
  39. struct bt_ctf_event_class *event_class;
  40. };
  41. #define MAX_CPUS 4096
  42. struct ctf_stream {
  43. struct bt_ctf_stream *stream;
  44. int cpu;
  45. u32 count;
  46. };
  47. struct ctf_writer {
  48. /* writer primitives */
  49. struct bt_ctf_writer *writer;
  50. struct ctf_stream **stream;
  51. int stream_cnt;
  52. struct bt_ctf_stream_class *stream_class;
  53. struct bt_ctf_clock *clock;
  54. /* data types */
  55. union {
  56. struct {
  57. struct bt_ctf_field_type *s64;
  58. struct bt_ctf_field_type *u64;
  59. struct bt_ctf_field_type *s32;
  60. struct bt_ctf_field_type *u32;
  61. struct bt_ctf_field_type *string;
  62. struct bt_ctf_field_type *u32_hex;
  63. struct bt_ctf_field_type *u64_hex;
  64. };
  65. struct bt_ctf_field_type *array[6];
  66. } data;
  67. struct bt_ctf_event_class *comm_class;
  68. struct bt_ctf_event_class *exit_class;
  69. struct bt_ctf_event_class *fork_class;
  70. struct bt_ctf_event_class *mmap_class;
  71. struct bt_ctf_event_class *mmap2_class;
  72. };
  73. struct convert {
  74. struct perf_tool tool;
  75. struct ctf_writer writer;
  76. u64 events_size;
  77. u64 events_count;
  78. u64 non_sample_count;
  79. /* Ordered events configured queue size. */
  80. u64 queue_size;
  81. };
  82. static int value_set(struct bt_ctf_field_type *type,
  83. struct bt_ctf_event *event,
  84. const char *name, u64 val)
  85. {
  86. struct bt_ctf_field *field;
  87. bool sign = bt_ctf_field_type_integer_get_signed(type);
  88. int ret;
  89. field = bt_ctf_field_create(type);
  90. if (!field) {
  91. pr_err("failed to create a field %s\n", name);
  92. return -1;
  93. }
  94. if (sign) {
  95. ret = bt_ctf_field_signed_integer_set_value(field, val);
  96. if (ret) {
  97. pr_err("failed to set field value %s\n", name);
  98. goto err;
  99. }
  100. } else {
  101. ret = bt_ctf_field_unsigned_integer_set_value(field, val);
  102. if (ret) {
  103. pr_err("failed to set field value %s\n", name);
  104. goto err;
  105. }
  106. }
  107. ret = bt_ctf_event_set_payload(event, name, field);
  108. if (ret) {
  109. pr_err("failed to set payload %s\n", name);
  110. goto err;
  111. }
  112. pr2(" SET [%s = %" PRIu64 "]\n", name, val);
  113. err:
  114. bt_ctf_field_put(field);
  115. return ret;
  116. }
  117. #define __FUNC_VALUE_SET(_name, _val_type) \
  118. static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \
  119. struct bt_ctf_event *event, \
  120. const char *name, \
  121. _val_type val) \
  122. { \
  123. struct bt_ctf_field_type *type = cw->data._name; \
  124. return value_set(type, event, name, (u64) val); \
  125. }
  126. #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
  127. FUNC_VALUE_SET(s32)
  128. FUNC_VALUE_SET(u32)
  129. FUNC_VALUE_SET(s64)
  130. FUNC_VALUE_SET(u64)
  131. __FUNC_VALUE_SET(u64_hex, u64)
  132. static int string_set_value(struct bt_ctf_field *field, const char *string);
  133. static __maybe_unused int
  134. value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
  135. const char *name, const char *string)
  136. {
  137. struct bt_ctf_field_type *type = cw->data.string;
  138. struct bt_ctf_field *field;
  139. int ret = 0;
  140. field = bt_ctf_field_create(type);
  141. if (!field) {
  142. pr_err("failed to create a field %s\n", name);
  143. return -1;
  144. }
  145. ret = string_set_value(field, string);
  146. if (ret) {
  147. pr_err("failed to set value %s\n", name);
  148. goto err_put_field;
  149. }
  150. ret = bt_ctf_event_set_payload(event, name, field);
  151. if (ret)
  152. pr_err("failed to set payload %s\n", name);
  153. err_put_field:
  154. bt_ctf_field_put(field);
  155. return ret;
  156. }
  157. static struct bt_ctf_field_type*
  158. get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field)
  159. {
  160. unsigned long flags = field->flags;
  161. if (flags & FIELD_IS_STRING)
  162. return cw->data.string;
  163. if (!(flags & FIELD_IS_SIGNED)) {
  164. /* unsigned long are mostly pointers */
  165. if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER)
  166. return cw->data.u64_hex;
  167. }
  168. if (flags & FIELD_IS_SIGNED) {
  169. if (field->size == 8)
  170. return cw->data.s64;
  171. else
  172. return cw->data.s32;
  173. }
  174. if (field->size == 8)
  175. return cw->data.u64;
  176. else
  177. return cw->data.u32;
  178. }
  179. static unsigned long long adjust_signedness(unsigned long long value_int, int size)
  180. {
  181. unsigned long long value_mask;
  182. /*
  183. * value_mask = (1 << (size * 8 - 1)) - 1.
  184. * Directly set value_mask for code readers.
  185. */
  186. switch (size) {
  187. case 1:
  188. value_mask = 0x7fULL;
  189. break;
  190. case 2:
  191. value_mask = 0x7fffULL;
  192. break;
  193. case 4:
  194. value_mask = 0x7fffffffULL;
  195. break;
  196. case 8:
  197. /*
  198. * For 64 bit value, return it self. There is no need
  199. * to fill high bit.
  200. */
  201. /* Fall through */
  202. default:
  203. /* BUG! */
  204. return value_int;
  205. }
  206. /* If it is a positive value, don't adjust. */
  207. if ((value_int & (~0ULL - value_mask)) == 0)
  208. return value_int;
  209. /* Fill upper part of value_int with 1 to make it a negative long long. */
  210. return (value_int & value_mask) | ~value_mask;
  211. }
  212. static int string_set_value(struct bt_ctf_field *field, const char *string)
  213. {
  214. char *buffer = NULL;
  215. size_t len = strlen(string), i, p;
  216. int err;
  217. for (i = p = 0; i < len; i++, p++) {
  218. if (isprint(string[i])) {
  219. if (!buffer)
  220. continue;
  221. buffer[p] = string[i];
  222. } else {
  223. char numstr[5];
  224. snprintf(numstr, sizeof(numstr), "\\x%02x",
  225. (unsigned int)(string[i]) & 0xff);
  226. if (!buffer) {
  227. buffer = zalloc(i + (len - i) * 4 + 2);
  228. if (!buffer) {
  229. pr_err("failed to set unprintable string '%s'\n", string);
  230. return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
  231. }
  232. if (i > 0)
  233. strncpy(buffer, string, i);
  234. }
  235. memcpy(buffer + p, numstr, 4);
  236. p += 3;
  237. }
  238. }
  239. if (!buffer)
  240. return bt_ctf_field_string_set_value(field, string);
  241. err = bt_ctf_field_string_set_value(field, buffer);
  242. free(buffer);
  243. return err;
  244. }
  245. static int add_tracepoint_field_value(struct ctf_writer *cw,
  246. struct bt_ctf_event_class *event_class,
  247. struct bt_ctf_event *event,
  248. struct perf_sample *sample,
  249. struct format_field *fmtf)
  250. {
  251. struct bt_ctf_field_type *type;
  252. struct bt_ctf_field *array_field;
  253. struct bt_ctf_field *field;
  254. const char *name = fmtf->name;
  255. void *data = sample->raw_data;
  256. unsigned long flags = fmtf->flags;
  257. unsigned int n_items;
  258. unsigned int i;
  259. unsigned int offset;
  260. unsigned int len;
  261. int ret;
  262. name = fmtf->alias;
  263. offset = fmtf->offset;
  264. len = fmtf->size;
  265. if (flags & FIELD_IS_STRING)
  266. flags &= ~FIELD_IS_ARRAY;
  267. if (flags & FIELD_IS_DYNAMIC) {
  268. unsigned long long tmp_val;
  269. tmp_val = tep_read_number(fmtf->event->pevent,
  270. data + offset, len);
  271. offset = tmp_val;
  272. len = offset >> 16;
  273. offset &= 0xffff;
  274. }
  275. if (flags & FIELD_IS_ARRAY) {
  276. type = bt_ctf_event_class_get_field_by_name(
  277. event_class, name);
  278. array_field = bt_ctf_field_create(type);
  279. bt_ctf_field_type_put(type);
  280. if (!array_field) {
  281. pr_err("Failed to create array type %s\n", name);
  282. return -1;
  283. }
  284. len = fmtf->size / fmtf->arraylen;
  285. n_items = fmtf->arraylen;
  286. } else {
  287. n_items = 1;
  288. array_field = NULL;
  289. }
  290. type = get_tracepoint_field_type(cw, fmtf);
  291. for (i = 0; i < n_items; i++) {
  292. if (flags & FIELD_IS_ARRAY)
  293. field = bt_ctf_field_array_get_field(array_field, i);
  294. else
  295. field = bt_ctf_field_create(type);
  296. if (!field) {
  297. pr_err("failed to create a field %s\n", name);
  298. return -1;
  299. }
  300. if (flags & FIELD_IS_STRING)
  301. ret = string_set_value(field, data + offset + i * len);
  302. else {
  303. unsigned long long value_int;
  304. value_int = tep_read_number(
  305. fmtf->event->pevent,
  306. data + offset + i * len, len);
  307. if (!(flags & FIELD_IS_SIGNED))
  308. ret = bt_ctf_field_unsigned_integer_set_value(
  309. field, value_int);
  310. else
  311. ret = bt_ctf_field_signed_integer_set_value(
  312. field, adjust_signedness(value_int, len));
  313. }
  314. if (ret) {
  315. pr_err("failed to set file value %s\n", name);
  316. goto err_put_field;
  317. }
  318. if (!(flags & FIELD_IS_ARRAY)) {
  319. ret = bt_ctf_event_set_payload(event, name, field);
  320. if (ret) {
  321. pr_err("failed to set payload %s\n", name);
  322. goto err_put_field;
  323. }
  324. }
  325. bt_ctf_field_put(field);
  326. }
  327. if (flags & FIELD_IS_ARRAY) {
  328. ret = bt_ctf_event_set_payload(event, name, array_field);
  329. if (ret) {
  330. pr_err("Failed add payload array %s\n", name);
  331. return -1;
  332. }
  333. bt_ctf_field_put(array_field);
  334. }
  335. return 0;
  336. err_put_field:
  337. bt_ctf_field_put(field);
  338. return -1;
  339. }
  340. static int add_tracepoint_fields_values(struct ctf_writer *cw,
  341. struct bt_ctf_event_class *event_class,
  342. struct bt_ctf_event *event,
  343. struct format_field *fields,
  344. struct perf_sample *sample)
  345. {
  346. struct format_field *field;
  347. int ret;
  348. for (field = fields; field; field = field->next) {
  349. ret = add_tracepoint_field_value(cw, event_class, event, sample,
  350. field);
  351. if (ret)
  352. return -1;
  353. }
  354. return 0;
  355. }
  356. static int add_tracepoint_values(struct ctf_writer *cw,
  357. struct bt_ctf_event_class *event_class,
  358. struct bt_ctf_event *event,
  359. struct perf_evsel *evsel,
  360. struct perf_sample *sample)
  361. {
  362. struct format_field *common_fields = evsel->tp_format->format.common_fields;
  363. struct format_field *fields = evsel->tp_format->format.fields;
  364. int ret;
  365. ret = add_tracepoint_fields_values(cw, event_class, event,
  366. common_fields, sample);
  367. if (!ret)
  368. ret = add_tracepoint_fields_values(cw, event_class, event,
  369. fields, sample);
  370. return ret;
  371. }
  372. static int
  373. add_bpf_output_values(struct bt_ctf_event_class *event_class,
  374. struct bt_ctf_event *event,
  375. struct perf_sample *sample)
  376. {
  377. struct bt_ctf_field_type *len_type, *seq_type;
  378. struct bt_ctf_field *len_field, *seq_field;
  379. unsigned int raw_size = sample->raw_size;
  380. unsigned int nr_elements = raw_size / sizeof(u32);
  381. unsigned int i;
  382. int ret;
  383. if (nr_elements * sizeof(u32) != raw_size)
  384. pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
  385. raw_size, nr_elements * sizeof(u32) - raw_size);
  386. len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
  387. len_field = bt_ctf_field_create(len_type);
  388. if (!len_field) {
  389. pr_err("failed to create 'raw_len' for bpf output event\n");
  390. ret = -1;
  391. goto put_len_type;
  392. }
  393. ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
  394. if (ret) {
  395. pr_err("failed to set field value for raw_len\n");
  396. goto put_len_field;
  397. }
  398. ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
  399. if (ret) {
  400. pr_err("failed to set payload to raw_len\n");
  401. goto put_len_field;
  402. }
  403. seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
  404. seq_field = bt_ctf_field_create(seq_type);
  405. if (!seq_field) {
  406. pr_err("failed to create 'raw_data' for bpf output event\n");
  407. ret = -1;
  408. goto put_seq_type;
  409. }
  410. ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
  411. if (ret) {
  412. pr_err("failed to set length of 'raw_data'\n");
  413. goto put_seq_field;
  414. }
  415. for (i = 0; i < nr_elements; i++) {
  416. struct bt_ctf_field *elem_field =
  417. bt_ctf_field_sequence_get_field(seq_field, i);
  418. ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
  419. ((u32 *)(sample->raw_data))[i]);
  420. bt_ctf_field_put(elem_field);
  421. if (ret) {
  422. pr_err("failed to set raw_data[%d]\n", i);
  423. goto put_seq_field;
  424. }
  425. }
  426. ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
  427. if (ret)
  428. pr_err("failed to set payload for raw_data\n");
  429. put_seq_field:
  430. bt_ctf_field_put(seq_field);
  431. put_seq_type:
  432. bt_ctf_field_type_put(seq_type);
  433. put_len_field:
  434. bt_ctf_field_put(len_field);
  435. put_len_type:
  436. bt_ctf_field_type_put(len_type);
  437. return ret;
  438. }
  439. static int
  440. add_callchain_output_values(struct bt_ctf_event_class *event_class,
  441. struct bt_ctf_event *event,
  442. struct ip_callchain *callchain)
  443. {
  444. struct bt_ctf_field_type *len_type, *seq_type;
  445. struct bt_ctf_field *len_field, *seq_field;
  446. unsigned int nr_elements = callchain->nr;
  447. unsigned int i;
  448. int ret;
  449. len_type = bt_ctf_event_class_get_field_by_name(
  450. event_class, "perf_callchain_size");
  451. len_field = bt_ctf_field_create(len_type);
  452. if (!len_field) {
  453. pr_err("failed to create 'perf_callchain_size' for callchain output event\n");
  454. ret = -1;
  455. goto put_len_type;
  456. }
  457. ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
  458. if (ret) {
  459. pr_err("failed to set field value for perf_callchain_size\n");
  460. goto put_len_field;
  461. }
  462. ret = bt_ctf_event_set_payload(event, "perf_callchain_size", len_field);
  463. if (ret) {
  464. pr_err("failed to set payload to perf_callchain_size\n");
  465. goto put_len_field;
  466. }
  467. seq_type = bt_ctf_event_class_get_field_by_name(
  468. event_class, "perf_callchain");
  469. seq_field = bt_ctf_field_create(seq_type);
  470. if (!seq_field) {
  471. pr_err("failed to create 'perf_callchain' for callchain output event\n");
  472. ret = -1;
  473. goto put_seq_type;
  474. }
  475. ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
  476. if (ret) {
  477. pr_err("failed to set length of 'perf_callchain'\n");
  478. goto put_seq_field;
  479. }
  480. for (i = 0; i < nr_elements; i++) {
  481. struct bt_ctf_field *elem_field =
  482. bt_ctf_field_sequence_get_field(seq_field, i);
  483. ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
  484. ((u64 *)(callchain->ips))[i]);
  485. bt_ctf_field_put(elem_field);
  486. if (ret) {
  487. pr_err("failed to set callchain[%d]\n", i);
  488. goto put_seq_field;
  489. }
  490. }
  491. ret = bt_ctf_event_set_payload(event, "perf_callchain", seq_field);
  492. if (ret)
  493. pr_err("failed to set payload for raw_data\n");
  494. put_seq_field:
  495. bt_ctf_field_put(seq_field);
  496. put_seq_type:
  497. bt_ctf_field_type_put(seq_type);
  498. put_len_field:
  499. bt_ctf_field_put(len_field);
  500. put_len_type:
  501. bt_ctf_field_type_put(len_type);
  502. return ret;
  503. }
  504. static int add_generic_values(struct ctf_writer *cw,
  505. struct bt_ctf_event *event,
  506. struct perf_evsel *evsel,
  507. struct perf_sample *sample)
  508. {
  509. u64 type = evsel->attr.sample_type;
  510. int ret;
  511. /*
  512. * missing:
  513. * PERF_SAMPLE_TIME - not needed as we have it in
  514. * ctf event header
  515. * PERF_SAMPLE_READ - TODO
  516. * PERF_SAMPLE_RAW - tracepoint fields are handled separately
  517. * PERF_SAMPLE_BRANCH_STACK - TODO
  518. * PERF_SAMPLE_REGS_USER - TODO
  519. * PERF_SAMPLE_STACK_USER - TODO
  520. */
  521. if (type & PERF_SAMPLE_IP) {
  522. ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
  523. if (ret)
  524. return -1;
  525. }
  526. if (type & PERF_SAMPLE_TID) {
  527. ret = value_set_s32(cw, event, "perf_tid", sample->tid);
  528. if (ret)
  529. return -1;
  530. ret = value_set_s32(cw, event, "perf_pid", sample->pid);
  531. if (ret)
  532. return -1;
  533. }
  534. if ((type & PERF_SAMPLE_ID) ||
  535. (type & PERF_SAMPLE_IDENTIFIER)) {
  536. ret = value_set_u64(cw, event, "perf_id", sample->id);
  537. if (ret)
  538. return -1;
  539. }
  540. if (type & PERF_SAMPLE_STREAM_ID) {
  541. ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
  542. if (ret)
  543. return -1;
  544. }
  545. if (type & PERF_SAMPLE_PERIOD) {
  546. ret = value_set_u64(cw, event, "perf_period", sample->period);
  547. if (ret)
  548. return -1;
  549. }
  550. if (type & PERF_SAMPLE_WEIGHT) {
  551. ret = value_set_u64(cw, event, "perf_weight", sample->weight);
  552. if (ret)
  553. return -1;
  554. }
  555. if (type & PERF_SAMPLE_DATA_SRC) {
  556. ret = value_set_u64(cw, event, "perf_data_src",
  557. sample->data_src);
  558. if (ret)
  559. return -1;
  560. }
  561. if (type & PERF_SAMPLE_TRANSACTION) {
  562. ret = value_set_u64(cw, event, "perf_transaction",
  563. sample->transaction);
  564. if (ret)
  565. return -1;
  566. }
  567. return 0;
  568. }
  569. static int ctf_stream__flush(struct ctf_stream *cs)
  570. {
  571. int err = 0;
  572. if (cs) {
  573. err = bt_ctf_stream_flush(cs->stream);
  574. if (err)
  575. pr_err("CTF stream %d flush failed\n", cs->cpu);
  576. pr("Flush stream for cpu %d (%u samples)\n",
  577. cs->cpu, cs->count);
  578. cs->count = 0;
  579. }
  580. return err;
  581. }
  582. static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
  583. {
  584. struct ctf_stream *cs;
  585. struct bt_ctf_field *pkt_ctx = NULL;
  586. struct bt_ctf_field *cpu_field = NULL;
  587. struct bt_ctf_stream *stream = NULL;
  588. int ret;
  589. cs = zalloc(sizeof(*cs));
  590. if (!cs) {
  591. pr_err("Failed to allocate ctf stream\n");
  592. return NULL;
  593. }
  594. stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
  595. if (!stream) {
  596. pr_err("Failed to create CTF stream\n");
  597. goto out;
  598. }
  599. pkt_ctx = bt_ctf_stream_get_packet_context(stream);
  600. if (!pkt_ctx) {
  601. pr_err("Failed to obtain packet context\n");
  602. goto out;
  603. }
  604. cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
  605. bt_ctf_field_put(pkt_ctx);
  606. if (!cpu_field) {
  607. pr_err("Failed to obtain cpu field\n");
  608. goto out;
  609. }
  610. ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
  611. if (ret) {
  612. pr_err("Failed to update CPU number\n");
  613. goto out;
  614. }
  615. bt_ctf_field_put(cpu_field);
  616. cs->cpu = cpu;
  617. cs->stream = stream;
  618. return cs;
  619. out:
  620. if (cpu_field)
  621. bt_ctf_field_put(cpu_field);
  622. if (stream)
  623. bt_ctf_stream_put(stream);
  624. free(cs);
  625. return NULL;
  626. }
  627. static void ctf_stream__delete(struct ctf_stream *cs)
  628. {
  629. if (cs) {
  630. bt_ctf_stream_put(cs->stream);
  631. free(cs);
  632. }
  633. }
  634. static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
  635. {
  636. struct ctf_stream *cs = cw->stream[cpu];
  637. if (!cs) {
  638. cs = ctf_stream__create(cw, cpu);
  639. cw->stream[cpu] = cs;
  640. }
  641. return cs;
  642. }
  643. static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
  644. struct perf_evsel *evsel)
  645. {
  646. int cpu = 0;
  647. if (evsel->attr.sample_type & PERF_SAMPLE_CPU)
  648. cpu = sample->cpu;
  649. if (cpu > cw->stream_cnt) {
  650. pr_err("Event was recorded for CPU %d, limit is at %d.\n",
  651. cpu, cw->stream_cnt);
  652. cpu = 0;
  653. }
  654. return cpu;
  655. }
  656. #define STREAM_FLUSH_COUNT 100000
  657. /*
  658. * Currently we have no other way to determine the
  659. * time for the stream flush other than keep track
  660. * of the number of events and check it against
  661. * threshold.
  662. */
  663. static bool is_flush_needed(struct ctf_stream *cs)
  664. {
  665. return cs->count >= STREAM_FLUSH_COUNT;
  666. }
  667. static int process_sample_event(struct perf_tool *tool,
  668. union perf_event *_event,
  669. struct perf_sample *sample,
  670. struct perf_evsel *evsel,
  671. struct machine *machine __maybe_unused)
  672. {
  673. struct convert *c = container_of(tool, struct convert, tool);
  674. struct evsel_priv *priv = evsel->priv;
  675. struct ctf_writer *cw = &c->writer;
  676. struct ctf_stream *cs;
  677. struct bt_ctf_event_class *event_class;
  678. struct bt_ctf_event *event;
  679. int ret;
  680. unsigned long type = evsel->attr.sample_type;
  681. if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
  682. return 0;
  683. event_class = priv->event_class;
  684. /* update stats */
  685. c->events_count++;
  686. c->events_size += _event->header.size;
  687. pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
  688. event = bt_ctf_event_create(event_class);
  689. if (!event) {
  690. pr_err("Failed to create an CTF event\n");
  691. return -1;
  692. }
  693. bt_ctf_clock_set_time(cw->clock, sample->time);
  694. ret = add_generic_values(cw, event, evsel, sample);
  695. if (ret)
  696. return -1;
  697. if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
  698. ret = add_tracepoint_values(cw, event_class, event,
  699. evsel, sample);
  700. if (ret)
  701. return -1;
  702. }
  703. if (type & PERF_SAMPLE_CALLCHAIN) {
  704. ret = add_callchain_output_values(event_class,
  705. event, sample->callchain);
  706. if (ret)
  707. return -1;
  708. }
  709. if (perf_evsel__is_bpf_output(evsel)) {
  710. ret = add_bpf_output_values(event_class, event, sample);
  711. if (ret)
  712. return -1;
  713. }
  714. cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
  715. if (cs) {
  716. if (is_flush_needed(cs))
  717. ctf_stream__flush(cs);
  718. cs->count++;
  719. bt_ctf_stream_append_event(cs->stream, event);
  720. }
  721. bt_ctf_event_put(event);
  722. return cs ? 0 : -1;
  723. }
  724. #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \
  725. do { \
  726. ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
  727. if (ret) \
  728. return -1; \
  729. } while(0)
  730. #define __FUNC_PROCESS_NON_SAMPLE(_name, body) \
  731. static int process_##_name##_event(struct perf_tool *tool, \
  732. union perf_event *_event, \
  733. struct perf_sample *sample, \
  734. struct machine *machine) \
  735. { \
  736. struct convert *c = container_of(tool, struct convert, tool);\
  737. struct ctf_writer *cw = &c->writer; \
  738. struct bt_ctf_event_class *event_class = cw->_name##_class;\
  739. struct bt_ctf_event *event; \
  740. struct ctf_stream *cs; \
  741. int ret; \
  742. \
  743. c->non_sample_count++; \
  744. c->events_size += _event->header.size; \
  745. event = bt_ctf_event_create(event_class); \
  746. if (!event) { \
  747. pr_err("Failed to create an CTF event\n"); \
  748. return -1; \
  749. } \
  750. \
  751. bt_ctf_clock_set_time(cw->clock, sample->time); \
  752. body \
  753. cs = ctf_stream(cw, 0); \
  754. if (cs) { \
  755. if (is_flush_needed(cs)) \
  756. ctf_stream__flush(cs); \
  757. \
  758. cs->count++; \
  759. bt_ctf_stream_append_event(cs->stream, event); \
  760. } \
  761. bt_ctf_event_put(event); \
  762. \
  763. return perf_event__process_##_name(tool, _event, sample, machine);\
  764. }
  765. __FUNC_PROCESS_NON_SAMPLE(comm,
  766. __NON_SAMPLE_SET_FIELD(comm, u32, pid);
  767. __NON_SAMPLE_SET_FIELD(comm, u32, tid);
  768. __NON_SAMPLE_SET_FIELD(comm, string, comm);
  769. )
  770. __FUNC_PROCESS_NON_SAMPLE(fork,
  771. __NON_SAMPLE_SET_FIELD(fork, u32, pid);
  772. __NON_SAMPLE_SET_FIELD(fork, u32, ppid);
  773. __NON_SAMPLE_SET_FIELD(fork, u32, tid);
  774. __NON_SAMPLE_SET_FIELD(fork, u32, ptid);
  775. __NON_SAMPLE_SET_FIELD(fork, u64, time);
  776. )
  777. __FUNC_PROCESS_NON_SAMPLE(exit,
  778. __NON_SAMPLE_SET_FIELD(fork, u32, pid);
  779. __NON_SAMPLE_SET_FIELD(fork, u32, ppid);
  780. __NON_SAMPLE_SET_FIELD(fork, u32, tid);
  781. __NON_SAMPLE_SET_FIELD(fork, u32, ptid);
  782. __NON_SAMPLE_SET_FIELD(fork, u64, time);
  783. )
  784. __FUNC_PROCESS_NON_SAMPLE(mmap,
  785. __NON_SAMPLE_SET_FIELD(mmap, u32, pid);
  786. __NON_SAMPLE_SET_FIELD(mmap, u32, tid);
  787. __NON_SAMPLE_SET_FIELD(mmap, u64_hex, start);
  788. __NON_SAMPLE_SET_FIELD(mmap, string, filename);
  789. )
  790. __FUNC_PROCESS_NON_SAMPLE(mmap2,
  791. __NON_SAMPLE_SET_FIELD(mmap2, u32, pid);
  792. __NON_SAMPLE_SET_FIELD(mmap2, u32, tid);
  793. __NON_SAMPLE_SET_FIELD(mmap2, u64_hex, start);
  794. __NON_SAMPLE_SET_FIELD(mmap2, string, filename);
  795. )
  796. #undef __NON_SAMPLE_SET_FIELD
  797. #undef __FUNC_PROCESS_NON_SAMPLE
  798. /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
  799. static char *change_name(char *name, char *orig_name, int dup)
  800. {
  801. char *new_name = NULL;
  802. size_t len;
  803. if (!name)
  804. name = orig_name;
  805. if (dup >= 10)
  806. goto out;
  807. /*
  808. * Add '_' prefix to potential keywork. According to
  809. * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
  810. * futher CTF spec updating may require us to use '$'.
  811. */
  812. if (dup < 0)
  813. len = strlen(name) + sizeof("_");
  814. else
  815. len = strlen(orig_name) + sizeof("_dupl_X");
  816. new_name = malloc(len);
  817. if (!new_name)
  818. goto out;
  819. if (dup < 0)
  820. snprintf(new_name, len, "_%s", name);
  821. else
  822. snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
  823. out:
  824. if (name != orig_name)
  825. free(name);
  826. return new_name;
  827. }
  828. static int event_class_add_field(struct bt_ctf_event_class *event_class,
  829. struct bt_ctf_field_type *type,
  830. struct format_field *field)
  831. {
  832. struct bt_ctf_field_type *t = NULL;
  833. char *name;
  834. int dup = 1;
  835. int ret;
  836. /* alias was already assigned */
  837. if (field->alias != field->name)
  838. return bt_ctf_event_class_add_field(event_class, type,
  839. (char *)field->alias);
  840. name = field->name;
  841. /* If 'name' is a keywork, add prefix. */
  842. if (bt_ctf_validate_identifier(name))
  843. name = change_name(name, field->name, -1);
  844. if (!name) {
  845. pr_err("Failed to fix invalid identifier.");
  846. return -1;
  847. }
  848. while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
  849. bt_ctf_field_type_put(t);
  850. name = change_name(name, field->name, dup++);
  851. if (!name) {
  852. pr_err("Failed to create dup name for '%s'\n", field->name);
  853. return -1;
  854. }
  855. }
  856. ret = bt_ctf_event_class_add_field(event_class, type, name);
  857. if (!ret)
  858. field->alias = name;
  859. return ret;
  860. }
  861. static int add_tracepoint_fields_types(struct ctf_writer *cw,
  862. struct format_field *fields,
  863. struct bt_ctf_event_class *event_class)
  864. {
  865. struct format_field *field;
  866. int ret;
  867. for (field = fields; field; field = field->next) {
  868. struct bt_ctf_field_type *type;
  869. unsigned long flags = field->flags;
  870. pr2(" field '%s'\n", field->name);
  871. type = get_tracepoint_field_type(cw, field);
  872. if (!type)
  873. return -1;
  874. /*
  875. * A string is an array of chars. For this we use the string
  876. * type and don't care that it is an array. What we don't
  877. * support is an array of strings.
  878. */
  879. if (flags & FIELD_IS_STRING)
  880. flags &= ~FIELD_IS_ARRAY;
  881. if (flags & FIELD_IS_ARRAY)
  882. type = bt_ctf_field_type_array_create(type, field->arraylen);
  883. ret = event_class_add_field(event_class, type, field);
  884. if (flags & FIELD_IS_ARRAY)
  885. bt_ctf_field_type_put(type);
  886. if (ret) {
  887. pr_err("Failed to add field '%s': %d\n",
  888. field->name, ret);
  889. return -1;
  890. }
  891. }
  892. return 0;
  893. }
  894. static int add_tracepoint_types(struct ctf_writer *cw,
  895. struct perf_evsel *evsel,
  896. struct bt_ctf_event_class *class)
  897. {
  898. struct format_field *common_fields = evsel->tp_format->format.common_fields;
  899. struct format_field *fields = evsel->tp_format->format.fields;
  900. int ret;
  901. ret = add_tracepoint_fields_types(cw, common_fields, class);
  902. if (!ret)
  903. ret = add_tracepoint_fields_types(cw, fields, class);
  904. return ret;
  905. }
  906. static int add_bpf_output_types(struct ctf_writer *cw,
  907. struct bt_ctf_event_class *class)
  908. {
  909. struct bt_ctf_field_type *len_type = cw->data.u32;
  910. struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
  911. struct bt_ctf_field_type *seq_type;
  912. int ret;
  913. ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
  914. if (ret)
  915. return ret;
  916. seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
  917. if (!seq_type)
  918. return -1;
  919. return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
  920. }
  921. static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
  922. struct bt_ctf_event_class *event_class)
  923. {
  924. u64 type = evsel->attr.sample_type;
  925. /*
  926. * missing:
  927. * PERF_SAMPLE_TIME - not needed as we have it in
  928. * ctf event header
  929. * PERF_SAMPLE_READ - TODO
  930. * PERF_SAMPLE_CALLCHAIN - TODO
  931. * PERF_SAMPLE_RAW - tracepoint fields and BPF output
  932. * are handled separately
  933. * PERF_SAMPLE_BRANCH_STACK - TODO
  934. * PERF_SAMPLE_REGS_USER - TODO
  935. * PERF_SAMPLE_STACK_USER - TODO
  936. */
  937. #define ADD_FIELD(cl, t, n) \
  938. do { \
  939. pr2(" field '%s'\n", n); \
  940. if (bt_ctf_event_class_add_field(cl, t, n)) { \
  941. pr_err("Failed to add field '%s';\n", n); \
  942. return -1; \
  943. } \
  944. } while (0)
  945. if (type & PERF_SAMPLE_IP)
  946. ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
  947. if (type & PERF_SAMPLE_TID) {
  948. ADD_FIELD(event_class, cw->data.s32, "perf_tid");
  949. ADD_FIELD(event_class, cw->data.s32, "perf_pid");
  950. }
  951. if ((type & PERF_SAMPLE_ID) ||
  952. (type & PERF_SAMPLE_IDENTIFIER))
  953. ADD_FIELD(event_class, cw->data.u64, "perf_id");
  954. if (type & PERF_SAMPLE_STREAM_ID)
  955. ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
  956. if (type & PERF_SAMPLE_PERIOD)
  957. ADD_FIELD(event_class, cw->data.u64, "perf_period");
  958. if (type & PERF_SAMPLE_WEIGHT)
  959. ADD_FIELD(event_class, cw->data.u64, "perf_weight");
  960. if (type & PERF_SAMPLE_DATA_SRC)
  961. ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
  962. if (type & PERF_SAMPLE_TRANSACTION)
  963. ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
  964. if (type & PERF_SAMPLE_CALLCHAIN) {
  965. ADD_FIELD(event_class, cw->data.u32, "perf_callchain_size");
  966. ADD_FIELD(event_class,
  967. bt_ctf_field_type_sequence_create(
  968. cw->data.u64_hex, "perf_callchain_size"),
  969. "perf_callchain");
  970. }
  971. #undef ADD_FIELD
  972. return 0;
  973. }
  974. static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel)
  975. {
  976. struct bt_ctf_event_class *event_class;
  977. struct evsel_priv *priv;
  978. const char *name = perf_evsel__name(evsel);
  979. int ret;
  980. pr("Adding event '%s' (type %d)\n", name, evsel->attr.type);
  981. event_class = bt_ctf_event_class_create(name);
  982. if (!event_class)
  983. return -1;
  984. ret = add_generic_types(cw, evsel, event_class);
  985. if (ret)
  986. goto err;
  987. if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
  988. ret = add_tracepoint_types(cw, evsel, event_class);
  989. if (ret)
  990. goto err;
  991. }
  992. if (perf_evsel__is_bpf_output(evsel)) {
  993. ret = add_bpf_output_types(cw, event_class);
  994. if (ret)
  995. goto err;
  996. }
  997. ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
  998. if (ret) {
  999. pr("Failed to add event class into stream.\n");
  1000. goto err;
  1001. }
  1002. priv = malloc(sizeof(*priv));
  1003. if (!priv)
  1004. goto err;
  1005. priv->event_class = event_class;
  1006. evsel->priv = priv;
  1007. return 0;
  1008. err:
  1009. bt_ctf_event_class_put(event_class);
  1010. pr_err("Failed to add event '%s'.\n", name);
  1011. return -1;
  1012. }
  1013. static int setup_events(struct ctf_writer *cw, struct perf_session *session)
  1014. {
  1015. struct perf_evlist *evlist = session->evlist;
  1016. struct perf_evsel *evsel;
  1017. int ret;
  1018. evlist__for_each_entry(evlist, evsel) {
  1019. ret = add_event(cw, evsel);
  1020. if (ret)
  1021. return ret;
  1022. }
  1023. return 0;
  1024. }
  1025. #define __NON_SAMPLE_ADD_FIELD(t, n) \
  1026. do { \
  1027. pr2(" field '%s'\n", #n); \
  1028. if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
  1029. pr_err("Failed to add field '%s';\n", #n);\
  1030. return -1; \
  1031. } \
  1032. } while(0)
  1033. #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \
  1034. static int add_##_name##_event(struct ctf_writer *cw) \
  1035. { \
  1036. struct bt_ctf_event_class *event_class; \
  1037. int ret; \
  1038. \
  1039. pr("Adding "#_name" event\n"); \
  1040. event_class = bt_ctf_event_class_create("perf_" #_name);\
  1041. if (!event_class) \
  1042. return -1; \
  1043. body \
  1044. \
  1045. ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
  1046. if (ret) { \
  1047. pr("Failed to add event class '"#_name"' into stream.\n");\
  1048. return ret; \
  1049. } \
  1050. \
  1051. cw->_name##_class = event_class; \
  1052. bt_ctf_event_class_put(event_class); \
  1053. return 0; \
  1054. }
  1055. __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
  1056. __NON_SAMPLE_ADD_FIELD(u32, pid);
  1057. __NON_SAMPLE_ADD_FIELD(u32, tid);
  1058. __NON_SAMPLE_ADD_FIELD(string, comm);
  1059. )
  1060. __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
  1061. __NON_SAMPLE_ADD_FIELD(u32, pid);
  1062. __NON_SAMPLE_ADD_FIELD(u32, ppid);
  1063. __NON_SAMPLE_ADD_FIELD(u32, tid);
  1064. __NON_SAMPLE_ADD_FIELD(u32, ptid);
  1065. __NON_SAMPLE_ADD_FIELD(u64, time);
  1066. )
  1067. __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
  1068. __NON_SAMPLE_ADD_FIELD(u32, pid);
  1069. __NON_SAMPLE_ADD_FIELD(u32, ppid);
  1070. __NON_SAMPLE_ADD_FIELD(u32, tid);
  1071. __NON_SAMPLE_ADD_FIELD(u32, ptid);
  1072. __NON_SAMPLE_ADD_FIELD(u64, time);
  1073. )
  1074. __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap,
  1075. __NON_SAMPLE_ADD_FIELD(u32, pid);
  1076. __NON_SAMPLE_ADD_FIELD(u32, tid);
  1077. __NON_SAMPLE_ADD_FIELD(u64_hex, start);
  1078. __NON_SAMPLE_ADD_FIELD(string, filename);
  1079. )
  1080. __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(mmap2,
  1081. __NON_SAMPLE_ADD_FIELD(u32, pid);
  1082. __NON_SAMPLE_ADD_FIELD(u32, tid);
  1083. __NON_SAMPLE_ADD_FIELD(u64_hex, start);
  1084. __NON_SAMPLE_ADD_FIELD(string, filename);
  1085. )
  1086. #undef __NON_SAMPLE_ADD_FIELD
  1087. #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
  1088. static int setup_non_sample_events(struct ctf_writer *cw,
  1089. struct perf_session *session __maybe_unused)
  1090. {
  1091. int ret;
  1092. ret = add_comm_event(cw);
  1093. if (ret)
  1094. return ret;
  1095. ret = add_exit_event(cw);
  1096. if (ret)
  1097. return ret;
  1098. ret = add_fork_event(cw);
  1099. if (ret)
  1100. return ret;
  1101. ret = add_mmap_event(cw);
  1102. if (ret)
  1103. return ret;
  1104. ret = add_mmap2_event(cw);
  1105. if (ret)
  1106. return ret;
  1107. return 0;
  1108. }
  1109. static void cleanup_events(struct perf_session *session)
  1110. {
  1111. struct perf_evlist *evlist = session->evlist;
  1112. struct perf_evsel *evsel;
  1113. evlist__for_each_entry(evlist, evsel) {
  1114. struct evsel_priv *priv;
  1115. priv = evsel->priv;
  1116. bt_ctf_event_class_put(priv->event_class);
  1117. zfree(&evsel->priv);
  1118. }
  1119. perf_evlist__delete(evlist);
  1120. session->evlist = NULL;
  1121. }
  1122. static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
  1123. {
  1124. struct ctf_stream **stream;
  1125. struct perf_header *ph = &session->header;
  1126. int ncpus;
  1127. /*
  1128. * Try to get the number of cpus used in the data file,
  1129. * if not present fallback to the MAX_CPUS.
  1130. */
  1131. ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
  1132. stream = zalloc(sizeof(*stream) * ncpus);
  1133. if (!stream) {
  1134. pr_err("Failed to allocate streams.\n");
  1135. return -ENOMEM;
  1136. }
  1137. cw->stream = stream;
  1138. cw->stream_cnt = ncpus;
  1139. return 0;
  1140. }
  1141. static void free_streams(struct ctf_writer *cw)
  1142. {
  1143. int cpu;
  1144. for (cpu = 0; cpu < cw->stream_cnt; cpu++)
  1145. ctf_stream__delete(cw->stream[cpu]);
  1146. free(cw->stream);
  1147. }
  1148. static int ctf_writer__setup_env(struct ctf_writer *cw,
  1149. struct perf_session *session)
  1150. {
  1151. struct perf_header *header = &session->header;
  1152. struct bt_ctf_writer *writer = cw->writer;
  1153. #define ADD(__n, __v) \
  1154. do { \
  1155. if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \
  1156. return -1; \
  1157. } while (0)
  1158. ADD("host", header->env.hostname);
  1159. ADD("sysname", "Linux");
  1160. ADD("release", header->env.os_release);
  1161. ADD("version", header->env.version);
  1162. ADD("machine", header->env.arch);
  1163. ADD("domain", "kernel");
  1164. ADD("tracer_name", "perf");
  1165. #undef ADD
  1166. return 0;
  1167. }
  1168. static int ctf_writer__setup_clock(struct ctf_writer *cw)
  1169. {
  1170. struct bt_ctf_clock *clock = cw->clock;
  1171. bt_ctf_clock_set_description(clock, "perf clock");
  1172. #define SET(__n, __v) \
  1173. do { \
  1174. if (bt_ctf_clock_set_##__n(clock, __v)) \
  1175. return -1; \
  1176. } while (0)
  1177. SET(frequency, 1000000000);
  1178. SET(offset_s, 0);
  1179. SET(offset, 0);
  1180. SET(precision, 10);
  1181. SET(is_absolute, 0);
  1182. #undef SET
  1183. return 0;
  1184. }
  1185. static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
  1186. {
  1187. struct bt_ctf_field_type *type;
  1188. type = bt_ctf_field_type_integer_create(size);
  1189. if (!type)
  1190. return NULL;
  1191. if (sign &&
  1192. bt_ctf_field_type_integer_set_signed(type, 1))
  1193. goto err;
  1194. if (hex &&
  1195. bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
  1196. goto err;
  1197. #if __BYTE_ORDER == __BIG_ENDIAN
  1198. bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
  1199. #else
  1200. bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
  1201. #endif
  1202. pr2("Created type: INTEGER %d-bit %ssigned %s\n",
  1203. size, sign ? "un" : "", hex ? "hex" : "");
  1204. return type;
  1205. err:
  1206. bt_ctf_field_type_put(type);
  1207. return NULL;
  1208. }
  1209. static void ctf_writer__cleanup_data(struct ctf_writer *cw)
  1210. {
  1211. unsigned int i;
  1212. for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
  1213. bt_ctf_field_type_put(cw->data.array[i]);
  1214. }
  1215. static int ctf_writer__init_data(struct ctf_writer *cw)
  1216. {
  1217. #define CREATE_INT_TYPE(type, size, sign, hex) \
  1218. do { \
  1219. (type) = create_int_type(size, sign, hex); \
  1220. if (!(type)) \
  1221. goto err; \
  1222. } while (0)
  1223. CREATE_INT_TYPE(cw->data.s64, 64, true, false);
  1224. CREATE_INT_TYPE(cw->data.u64, 64, false, false);
  1225. CREATE_INT_TYPE(cw->data.s32, 32, true, false);
  1226. CREATE_INT_TYPE(cw->data.u32, 32, false, false);
  1227. CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
  1228. CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
  1229. cw->data.string = bt_ctf_field_type_string_create();
  1230. if (cw->data.string)
  1231. return 0;
  1232. err:
  1233. ctf_writer__cleanup_data(cw);
  1234. pr_err("Failed to create data types.\n");
  1235. return -1;
  1236. }
  1237. static void ctf_writer__cleanup(struct ctf_writer *cw)
  1238. {
  1239. ctf_writer__cleanup_data(cw);
  1240. bt_ctf_clock_put(cw->clock);
  1241. free_streams(cw);
  1242. bt_ctf_stream_class_put(cw->stream_class);
  1243. bt_ctf_writer_put(cw->writer);
  1244. /* and NULL all the pointers */
  1245. memset(cw, 0, sizeof(*cw));
  1246. }
  1247. static int ctf_writer__init(struct ctf_writer *cw, const char *path)
  1248. {
  1249. struct bt_ctf_writer *writer;
  1250. struct bt_ctf_stream_class *stream_class;
  1251. struct bt_ctf_clock *clock;
  1252. struct bt_ctf_field_type *pkt_ctx_type;
  1253. int ret;
  1254. /* CTF writer */
  1255. writer = bt_ctf_writer_create(path);
  1256. if (!writer)
  1257. goto err;
  1258. cw->writer = writer;
  1259. /* CTF clock */
  1260. clock = bt_ctf_clock_create("perf_clock");
  1261. if (!clock) {
  1262. pr("Failed to create CTF clock.\n");
  1263. goto err_cleanup;
  1264. }
  1265. cw->clock = clock;
  1266. if (ctf_writer__setup_clock(cw)) {
  1267. pr("Failed to setup CTF clock.\n");
  1268. goto err_cleanup;
  1269. }
  1270. /* CTF stream class */
  1271. stream_class = bt_ctf_stream_class_create("perf_stream");
  1272. if (!stream_class) {
  1273. pr("Failed to create CTF stream class.\n");
  1274. goto err_cleanup;
  1275. }
  1276. cw->stream_class = stream_class;
  1277. /* CTF clock stream setup */
  1278. if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
  1279. pr("Failed to assign CTF clock to stream class.\n");
  1280. goto err_cleanup;
  1281. }
  1282. if (ctf_writer__init_data(cw))
  1283. goto err_cleanup;
  1284. /* Add cpu_id for packet context */
  1285. pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
  1286. if (!pkt_ctx_type)
  1287. goto err_cleanup;
  1288. ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
  1289. bt_ctf_field_type_put(pkt_ctx_type);
  1290. if (ret)
  1291. goto err_cleanup;
  1292. /* CTF clock writer setup */
  1293. if (bt_ctf_writer_add_clock(writer, clock)) {
  1294. pr("Failed to assign CTF clock to writer.\n");
  1295. goto err_cleanup;
  1296. }
  1297. return 0;
  1298. err_cleanup:
  1299. ctf_writer__cleanup(cw);
  1300. err:
  1301. pr_err("Failed to setup CTF writer.\n");
  1302. return -1;
  1303. }
  1304. static int ctf_writer__flush_streams(struct ctf_writer *cw)
  1305. {
  1306. int cpu, ret = 0;
  1307. for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
  1308. ret = ctf_stream__flush(cw->stream[cpu]);
  1309. return ret;
  1310. }
  1311. static int convert__config(const char *var, const char *value, void *cb)
  1312. {
  1313. struct convert *c = cb;
  1314. if (!strcmp(var, "convert.queue-size"))
  1315. return perf_config_u64(&c->queue_size, var, value);
  1316. return 0;
  1317. }
  1318. int bt_convert__perf2ctf(const char *input, const char *path,
  1319. struct perf_data_convert_opts *opts)
  1320. {
  1321. struct perf_session *session;
  1322. struct perf_data data = {
  1323. .file.path = input,
  1324. .mode = PERF_DATA_MODE_READ,
  1325. .force = opts->force,
  1326. };
  1327. struct convert c = {
  1328. .tool = {
  1329. .sample = process_sample_event,
  1330. .mmap = perf_event__process_mmap,
  1331. .mmap2 = perf_event__process_mmap2,
  1332. .comm = perf_event__process_comm,
  1333. .exit = perf_event__process_exit,
  1334. .fork = perf_event__process_fork,
  1335. .lost = perf_event__process_lost,
  1336. .tracing_data = perf_event__process_tracing_data,
  1337. .build_id = perf_event__process_build_id,
  1338. .namespaces = perf_event__process_namespaces,
  1339. .ordered_events = true,
  1340. .ordering_requires_timestamps = true,
  1341. },
  1342. };
  1343. struct ctf_writer *cw = &c.writer;
  1344. int err;
  1345. if (opts->all) {
  1346. c.tool.comm = process_comm_event;
  1347. c.tool.exit = process_exit_event;
  1348. c.tool.fork = process_fork_event;
  1349. c.tool.mmap = process_mmap_event;
  1350. c.tool.mmap2 = process_mmap2_event;
  1351. }
  1352. err = perf_config(convert__config, &c);
  1353. if (err)
  1354. return err;
  1355. /* CTF writer */
  1356. if (ctf_writer__init(cw, path))
  1357. return -1;
  1358. err = -1;
  1359. /* perf.data session */
  1360. session = perf_session__new(&data, 0, &c.tool);
  1361. if (!session)
  1362. goto free_writer;
  1363. if (c.queue_size) {
  1364. ordered_events__set_alloc_size(&session->ordered_events,
  1365. c.queue_size);
  1366. }
  1367. /* CTF writer env/clock setup */
  1368. if (ctf_writer__setup_env(cw, session))
  1369. goto free_session;
  1370. /* CTF events setup */
  1371. if (setup_events(cw, session))
  1372. goto free_session;
  1373. if (opts->all && setup_non_sample_events(cw, session))
  1374. goto free_session;
  1375. if (setup_streams(cw, session))
  1376. goto free_session;
  1377. err = perf_session__process_events(session);
  1378. if (!err)
  1379. err = ctf_writer__flush_streams(cw);
  1380. else
  1381. pr_err("Error during conversion.\n");
  1382. fprintf(stderr,
  1383. "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
  1384. data.file.path, path);
  1385. fprintf(stderr,
  1386. "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
  1387. (double) c.events_size / 1024.0 / 1024.0,
  1388. c.events_count);
  1389. if (!c.non_sample_count)
  1390. fprintf(stderr, ") ]\n");
  1391. else
  1392. fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
  1393. cleanup_events(session);
  1394. perf_session__delete(session);
  1395. ctf_writer__cleanup(cw);
  1396. return err;
  1397. free_session:
  1398. perf_session__delete(session);
  1399. free_writer:
  1400. ctf_writer__cleanup(cw);
  1401. pr_err("Error during conversion setup.\n");
  1402. return err;
  1403. }