data-convert-bt.c 36 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547
  1. /*
  2. * CTF writing support via babeltrace.
  3. *
  4. * Copyright (C) 2014, Jiri Olsa <jolsa@redhat.com>
  5. * Copyright (C) 2014, Sebastian Andrzej Siewior <bigeasy@linutronix.de>
  6. *
  7. * Released under the GPL v2. (and only v2, not any later version)
  8. */
  9. #include <linux/compiler.h>
  10. #include <babeltrace/ctf-writer/writer.h>
  11. #include <babeltrace/ctf-writer/clock.h>
  12. #include <babeltrace/ctf-writer/stream.h>
  13. #include <babeltrace/ctf-writer/event.h>
  14. #include <babeltrace/ctf-writer/event-types.h>
  15. #include <babeltrace/ctf-writer/event-fields.h>
  16. #include <babeltrace/ctf-ir/utils.h>
  17. #include <babeltrace/ctf/events.h>
  18. #include <traceevent/event-parse.h>
  19. #include "asm/bug.h"
  20. #include "data-convert-bt.h"
  21. #include "session.h"
  22. #include "util.h"
  23. #include "debug.h"
  24. #include "tool.h"
  25. #include "evlist.h"
  26. #include "evsel.h"
  27. #include "machine.h"
  28. #include "config.h"
  29. #define pr_N(n, fmt, ...) \
  30. eprintf(n, debug_data_convert, fmt, ##__VA_ARGS__)
  31. #define pr(fmt, ...) pr_N(1, pr_fmt(fmt), ##__VA_ARGS__)
  32. #define pr2(fmt, ...) pr_N(2, pr_fmt(fmt), ##__VA_ARGS__)
  33. #define pr_time2(t, fmt, ...) pr_time_N(2, debug_data_convert, t, pr_fmt(fmt), ##__VA_ARGS__)
  34. struct evsel_priv {
  35. struct bt_ctf_event_class *event_class;
  36. };
  37. #define MAX_CPUS 4096
  38. struct ctf_stream {
  39. struct bt_ctf_stream *stream;
  40. int cpu;
  41. u32 count;
  42. };
  43. struct ctf_writer {
  44. /* writer primitives */
  45. struct bt_ctf_writer *writer;
  46. struct ctf_stream **stream;
  47. int stream_cnt;
  48. struct bt_ctf_stream_class *stream_class;
  49. struct bt_ctf_clock *clock;
  50. /* data types */
  51. union {
  52. struct {
  53. struct bt_ctf_field_type *s64;
  54. struct bt_ctf_field_type *u64;
  55. struct bt_ctf_field_type *s32;
  56. struct bt_ctf_field_type *u32;
  57. struct bt_ctf_field_type *string;
  58. struct bt_ctf_field_type *u32_hex;
  59. struct bt_ctf_field_type *u64_hex;
  60. };
  61. struct bt_ctf_field_type *array[6];
  62. } data;
  63. struct bt_ctf_event_class *comm_class;
  64. struct bt_ctf_event_class *exit_class;
  65. struct bt_ctf_event_class *fork_class;
  66. };
  67. struct convert {
  68. struct perf_tool tool;
  69. struct ctf_writer writer;
  70. u64 events_size;
  71. u64 events_count;
  72. u64 non_sample_count;
  73. /* Ordered events configured queue size. */
  74. u64 queue_size;
  75. };
  76. static int value_set(struct bt_ctf_field_type *type,
  77. struct bt_ctf_event *event,
  78. const char *name, u64 val)
  79. {
  80. struct bt_ctf_field *field;
  81. bool sign = bt_ctf_field_type_integer_get_signed(type);
  82. int ret;
  83. field = bt_ctf_field_create(type);
  84. if (!field) {
  85. pr_err("failed to create a field %s\n", name);
  86. return -1;
  87. }
  88. if (sign) {
  89. ret = bt_ctf_field_signed_integer_set_value(field, val);
  90. if (ret) {
  91. pr_err("failed to set field value %s\n", name);
  92. goto err;
  93. }
  94. } else {
  95. ret = bt_ctf_field_unsigned_integer_set_value(field, val);
  96. if (ret) {
  97. pr_err("failed to set field value %s\n", name);
  98. goto err;
  99. }
  100. }
  101. ret = bt_ctf_event_set_payload(event, name, field);
  102. if (ret) {
  103. pr_err("failed to set payload %s\n", name);
  104. goto err;
  105. }
  106. pr2(" SET [%s = %" PRIu64 "]\n", name, val);
  107. err:
  108. bt_ctf_field_put(field);
  109. return ret;
  110. }
  111. #define __FUNC_VALUE_SET(_name, _val_type) \
  112. static __maybe_unused int value_set_##_name(struct ctf_writer *cw, \
  113. struct bt_ctf_event *event, \
  114. const char *name, \
  115. _val_type val) \
  116. { \
  117. struct bt_ctf_field_type *type = cw->data._name; \
  118. return value_set(type, event, name, (u64) val); \
  119. }
  120. #define FUNC_VALUE_SET(_name) __FUNC_VALUE_SET(_name, _name)
  121. FUNC_VALUE_SET(s32)
  122. FUNC_VALUE_SET(u32)
  123. FUNC_VALUE_SET(s64)
  124. FUNC_VALUE_SET(u64)
  125. __FUNC_VALUE_SET(u64_hex, u64)
  126. static int string_set_value(struct bt_ctf_field *field, const char *string);
  127. static __maybe_unused int
  128. value_set_string(struct ctf_writer *cw, struct bt_ctf_event *event,
  129. const char *name, const char *string)
  130. {
  131. struct bt_ctf_field_type *type = cw->data.string;
  132. struct bt_ctf_field *field;
  133. int ret = 0;
  134. field = bt_ctf_field_create(type);
  135. if (!field) {
  136. pr_err("failed to create a field %s\n", name);
  137. return -1;
  138. }
  139. ret = string_set_value(field, string);
  140. if (ret) {
  141. pr_err("failed to set value %s\n", name);
  142. goto err_put_field;
  143. }
  144. ret = bt_ctf_event_set_payload(event, name, field);
  145. if (ret)
  146. pr_err("failed to set payload %s\n", name);
  147. err_put_field:
  148. bt_ctf_field_put(field);
  149. return ret;
  150. }
  151. static struct bt_ctf_field_type*
  152. get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field)
  153. {
  154. unsigned long flags = field->flags;
  155. if (flags & FIELD_IS_STRING)
  156. return cw->data.string;
  157. if (!(flags & FIELD_IS_SIGNED)) {
  158. /* unsigned long are mostly pointers */
  159. if (flags & FIELD_IS_LONG || flags & FIELD_IS_POINTER)
  160. return cw->data.u64_hex;
  161. }
  162. if (flags & FIELD_IS_SIGNED) {
  163. if (field->size == 8)
  164. return cw->data.s64;
  165. else
  166. return cw->data.s32;
  167. }
  168. if (field->size == 8)
  169. return cw->data.u64;
  170. else
  171. return cw->data.u32;
  172. }
  173. static unsigned long long adjust_signedness(unsigned long long value_int, int size)
  174. {
  175. unsigned long long value_mask;
  176. /*
  177. * value_mask = (1 << (size * 8 - 1)) - 1.
  178. * Directly set value_mask for code readers.
  179. */
  180. switch (size) {
  181. case 1:
  182. value_mask = 0x7fULL;
  183. break;
  184. case 2:
  185. value_mask = 0x7fffULL;
  186. break;
  187. case 4:
  188. value_mask = 0x7fffffffULL;
  189. break;
  190. case 8:
  191. /*
  192. * For 64 bit value, return it self. There is no need
  193. * to fill high bit.
  194. */
  195. /* Fall through */
  196. default:
  197. /* BUG! */
  198. return value_int;
  199. }
  200. /* If it is a positive value, don't adjust. */
  201. if ((value_int & (~0ULL - value_mask)) == 0)
  202. return value_int;
  203. /* Fill upper part of value_int with 1 to make it a negative long long. */
  204. return (value_int & value_mask) | ~value_mask;
  205. }
  206. static int string_set_value(struct bt_ctf_field *field, const char *string)
  207. {
  208. char *buffer = NULL;
  209. size_t len = strlen(string), i, p;
  210. int err;
  211. for (i = p = 0; i < len; i++, p++) {
  212. if (isprint(string[i])) {
  213. if (!buffer)
  214. continue;
  215. buffer[p] = string[i];
  216. } else {
  217. char numstr[5];
  218. snprintf(numstr, sizeof(numstr), "\\x%02x",
  219. (unsigned int)(string[i]) & 0xff);
  220. if (!buffer) {
  221. buffer = zalloc(i + (len - i) * 4 + 2);
  222. if (!buffer) {
  223. pr_err("failed to set unprintable string '%s'\n", string);
  224. return bt_ctf_field_string_set_value(field, "UNPRINTABLE-STRING");
  225. }
  226. if (i > 0)
  227. strncpy(buffer, string, i);
  228. }
  229. strncat(buffer + p, numstr, 4);
  230. p += 3;
  231. }
  232. }
  233. if (!buffer)
  234. return bt_ctf_field_string_set_value(field, string);
  235. err = bt_ctf_field_string_set_value(field, buffer);
  236. free(buffer);
  237. return err;
  238. }
  239. static int add_tracepoint_field_value(struct ctf_writer *cw,
  240. struct bt_ctf_event_class *event_class,
  241. struct bt_ctf_event *event,
  242. struct perf_sample *sample,
  243. struct format_field *fmtf)
  244. {
  245. struct bt_ctf_field_type *type;
  246. struct bt_ctf_field *array_field;
  247. struct bt_ctf_field *field;
  248. const char *name = fmtf->name;
  249. void *data = sample->raw_data;
  250. unsigned long flags = fmtf->flags;
  251. unsigned int n_items;
  252. unsigned int i;
  253. unsigned int offset;
  254. unsigned int len;
  255. int ret;
  256. name = fmtf->alias;
  257. offset = fmtf->offset;
  258. len = fmtf->size;
  259. if (flags & FIELD_IS_STRING)
  260. flags &= ~FIELD_IS_ARRAY;
  261. if (flags & FIELD_IS_DYNAMIC) {
  262. unsigned long long tmp_val;
  263. tmp_val = pevent_read_number(fmtf->event->pevent,
  264. data + offset, len);
  265. offset = tmp_val;
  266. len = offset >> 16;
  267. offset &= 0xffff;
  268. }
  269. if (flags & FIELD_IS_ARRAY) {
  270. type = bt_ctf_event_class_get_field_by_name(
  271. event_class, name);
  272. array_field = bt_ctf_field_create(type);
  273. bt_ctf_field_type_put(type);
  274. if (!array_field) {
  275. pr_err("Failed to create array type %s\n", name);
  276. return -1;
  277. }
  278. len = fmtf->size / fmtf->arraylen;
  279. n_items = fmtf->arraylen;
  280. } else {
  281. n_items = 1;
  282. array_field = NULL;
  283. }
  284. type = get_tracepoint_field_type(cw, fmtf);
  285. for (i = 0; i < n_items; i++) {
  286. if (flags & FIELD_IS_ARRAY)
  287. field = bt_ctf_field_array_get_field(array_field, i);
  288. else
  289. field = bt_ctf_field_create(type);
  290. if (!field) {
  291. pr_err("failed to create a field %s\n", name);
  292. return -1;
  293. }
  294. if (flags & FIELD_IS_STRING)
  295. ret = string_set_value(field, data + offset + i * len);
  296. else {
  297. unsigned long long value_int;
  298. value_int = pevent_read_number(
  299. fmtf->event->pevent,
  300. data + offset + i * len, len);
  301. if (!(flags & FIELD_IS_SIGNED))
  302. ret = bt_ctf_field_unsigned_integer_set_value(
  303. field, value_int);
  304. else
  305. ret = bt_ctf_field_signed_integer_set_value(
  306. field, adjust_signedness(value_int, len));
  307. }
  308. if (ret) {
  309. pr_err("failed to set file value %s\n", name);
  310. goto err_put_field;
  311. }
  312. if (!(flags & FIELD_IS_ARRAY)) {
  313. ret = bt_ctf_event_set_payload(event, name, field);
  314. if (ret) {
  315. pr_err("failed to set payload %s\n", name);
  316. goto err_put_field;
  317. }
  318. }
  319. bt_ctf_field_put(field);
  320. }
  321. if (flags & FIELD_IS_ARRAY) {
  322. ret = bt_ctf_event_set_payload(event, name, array_field);
  323. if (ret) {
  324. pr_err("Failed add payload array %s\n", name);
  325. return -1;
  326. }
  327. bt_ctf_field_put(array_field);
  328. }
  329. return 0;
  330. err_put_field:
  331. bt_ctf_field_put(field);
  332. return -1;
  333. }
  334. static int add_tracepoint_fields_values(struct ctf_writer *cw,
  335. struct bt_ctf_event_class *event_class,
  336. struct bt_ctf_event *event,
  337. struct format_field *fields,
  338. struct perf_sample *sample)
  339. {
  340. struct format_field *field;
  341. int ret;
  342. for (field = fields; field; field = field->next) {
  343. ret = add_tracepoint_field_value(cw, event_class, event, sample,
  344. field);
  345. if (ret)
  346. return -1;
  347. }
  348. return 0;
  349. }
  350. static int add_tracepoint_values(struct ctf_writer *cw,
  351. struct bt_ctf_event_class *event_class,
  352. struct bt_ctf_event *event,
  353. struct perf_evsel *evsel,
  354. struct perf_sample *sample)
  355. {
  356. struct format_field *common_fields = evsel->tp_format->format.common_fields;
  357. struct format_field *fields = evsel->tp_format->format.fields;
  358. int ret;
  359. ret = add_tracepoint_fields_values(cw, event_class, event,
  360. common_fields, sample);
  361. if (!ret)
  362. ret = add_tracepoint_fields_values(cw, event_class, event,
  363. fields, sample);
  364. return ret;
  365. }
  366. static int
  367. add_bpf_output_values(struct bt_ctf_event_class *event_class,
  368. struct bt_ctf_event *event,
  369. struct perf_sample *sample)
  370. {
  371. struct bt_ctf_field_type *len_type, *seq_type;
  372. struct bt_ctf_field *len_field, *seq_field;
  373. unsigned int raw_size = sample->raw_size;
  374. unsigned int nr_elements = raw_size / sizeof(u32);
  375. unsigned int i;
  376. int ret;
  377. if (nr_elements * sizeof(u32) != raw_size)
  378. pr_warning("Incorrect raw_size (%u) in bpf output event, skip %zu bytes\n",
  379. raw_size, nr_elements * sizeof(u32) - raw_size);
  380. len_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_len");
  381. len_field = bt_ctf_field_create(len_type);
  382. if (!len_field) {
  383. pr_err("failed to create 'raw_len' for bpf output event\n");
  384. ret = -1;
  385. goto put_len_type;
  386. }
  387. ret = bt_ctf_field_unsigned_integer_set_value(len_field, nr_elements);
  388. if (ret) {
  389. pr_err("failed to set field value for raw_len\n");
  390. goto put_len_field;
  391. }
  392. ret = bt_ctf_event_set_payload(event, "raw_len", len_field);
  393. if (ret) {
  394. pr_err("failed to set payload to raw_len\n");
  395. goto put_len_field;
  396. }
  397. seq_type = bt_ctf_event_class_get_field_by_name(event_class, "raw_data");
  398. seq_field = bt_ctf_field_create(seq_type);
  399. if (!seq_field) {
  400. pr_err("failed to create 'raw_data' for bpf output event\n");
  401. ret = -1;
  402. goto put_seq_type;
  403. }
  404. ret = bt_ctf_field_sequence_set_length(seq_field, len_field);
  405. if (ret) {
  406. pr_err("failed to set length of 'raw_data'\n");
  407. goto put_seq_field;
  408. }
  409. for (i = 0; i < nr_elements; i++) {
  410. struct bt_ctf_field *elem_field =
  411. bt_ctf_field_sequence_get_field(seq_field, i);
  412. ret = bt_ctf_field_unsigned_integer_set_value(elem_field,
  413. ((u32 *)(sample->raw_data))[i]);
  414. bt_ctf_field_put(elem_field);
  415. if (ret) {
  416. pr_err("failed to set raw_data[%d]\n", i);
  417. goto put_seq_field;
  418. }
  419. }
  420. ret = bt_ctf_event_set_payload(event, "raw_data", seq_field);
  421. if (ret)
  422. pr_err("failed to set payload for raw_data\n");
  423. put_seq_field:
  424. bt_ctf_field_put(seq_field);
  425. put_seq_type:
  426. bt_ctf_field_type_put(seq_type);
  427. put_len_field:
  428. bt_ctf_field_put(len_field);
  429. put_len_type:
  430. bt_ctf_field_type_put(len_type);
  431. return ret;
  432. }
  433. static int add_generic_values(struct ctf_writer *cw,
  434. struct bt_ctf_event *event,
  435. struct perf_evsel *evsel,
  436. struct perf_sample *sample)
  437. {
  438. u64 type = evsel->attr.sample_type;
  439. int ret;
  440. /*
  441. * missing:
  442. * PERF_SAMPLE_TIME - not needed as we have it in
  443. * ctf event header
  444. * PERF_SAMPLE_READ - TODO
  445. * PERF_SAMPLE_CALLCHAIN - TODO
  446. * PERF_SAMPLE_RAW - tracepoint fields are handled separately
  447. * PERF_SAMPLE_BRANCH_STACK - TODO
  448. * PERF_SAMPLE_REGS_USER - TODO
  449. * PERF_SAMPLE_STACK_USER - TODO
  450. */
  451. if (type & PERF_SAMPLE_IP) {
  452. ret = value_set_u64_hex(cw, event, "perf_ip", sample->ip);
  453. if (ret)
  454. return -1;
  455. }
  456. if (type & PERF_SAMPLE_TID) {
  457. ret = value_set_s32(cw, event, "perf_tid", sample->tid);
  458. if (ret)
  459. return -1;
  460. ret = value_set_s32(cw, event, "perf_pid", sample->pid);
  461. if (ret)
  462. return -1;
  463. }
  464. if ((type & PERF_SAMPLE_ID) ||
  465. (type & PERF_SAMPLE_IDENTIFIER)) {
  466. ret = value_set_u64(cw, event, "perf_id", sample->id);
  467. if (ret)
  468. return -1;
  469. }
  470. if (type & PERF_SAMPLE_STREAM_ID) {
  471. ret = value_set_u64(cw, event, "perf_stream_id", sample->stream_id);
  472. if (ret)
  473. return -1;
  474. }
  475. if (type & PERF_SAMPLE_PERIOD) {
  476. ret = value_set_u64(cw, event, "perf_period", sample->period);
  477. if (ret)
  478. return -1;
  479. }
  480. if (type & PERF_SAMPLE_WEIGHT) {
  481. ret = value_set_u64(cw, event, "perf_weight", sample->weight);
  482. if (ret)
  483. return -1;
  484. }
  485. if (type & PERF_SAMPLE_DATA_SRC) {
  486. ret = value_set_u64(cw, event, "perf_data_src",
  487. sample->data_src);
  488. if (ret)
  489. return -1;
  490. }
  491. if (type & PERF_SAMPLE_TRANSACTION) {
  492. ret = value_set_u64(cw, event, "perf_transaction",
  493. sample->transaction);
  494. if (ret)
  495. return -1;
  496. }
  497. return 0;
  498. }
  499. static int ctf_stream__flush(struct ctf_stream *cs)
  500. {
  501. int err = 0;
  502. if (cs) {
  503. err = bt_ctf_stream_flush(cs->stream);
  504. if (err)
  505. pr_err("CTF stream %d flush failed\n", cs->cpu);
  506. pr("Flush stream for cpu %d (%u samples)\n",
  507. cs->cpu, cs->count);
  508. cs->count = 0;
  509. }
  510. return err;
  511. }
  512. static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
  513. {
  514. struct ctf_stream *cs;
  515. struct bt_ctf_field *pkt_ctx = NULL;
  516. struct bt_ctf_field *cpu_field = NULL;
  517. struct bt_ctf_stream *stream = NULL;
  518. int ret;
  519. cs = zalloc(sizeof(*cs));
  520. if (!cs) {
  521. pr_err("Failed to allocate ctf stream\n");
  522. return NULL;
  523. }
  524. stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
  525. if (!stream) {
  526. pr_err("Failed to create CTF stream\n");
  527. goto out;
  528. }
  529. pkt_ctx = bt_ctf_stream_get_packet_context(stream);
  530. if (!pkt_ctx) {
  531. pr_err("Failed to obtain packet context\n");
  532. goto out;
  533. }
  534. cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
  535. bt_ctf_field_put(pkt_ctx);
  536. if (!cpu_field) {
  537. pr_err("Failed to obtain cpu field\n");
  538. goto out;
  539. }
  540. ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
  541. if (ret) {
  542. pr_err("Failed to update CPU number\n");
  543. goto out;
  544. }
  545. bt_ctf_field_put(cpu_field);
  546. cs->cpu = cpu;
  547. cs->stream = stream;
  548. return cs;
  549. out:
  550. if (cpu_field)
  551. bt_ctf_field_put(cpu_field);
  552. if (stream)
  553. bt_ctf_stream_put(stream);
  554. free(cs);
  555. return NULL;
  556. }
  557. static void ctf_stream__delete(struct ctf_stream *cs)
  558. {
  559. if (cs) {
  560. bt_ctf_stream_put(cs->stream);
  561. free(cs);
  562. }
  563. }
  564. static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
  565. {
  566. struct ctf_stream *cs = cw->stream[cpu];
  567. if (!cs) {
  568. cs = ctf_stream__create(cw, cpu);
  569. cw->stream[cpu] = cs;
  570. }
  571. return cs;
  572. }
  573. static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
  574. struct perf_evsel *evsel)
  575. {
  576. int cpu = 0;
  577. if (evsel->attr.sample_type & PERF_SAMPLE_CPU)
  578. cpu = sample->cpu;
  579. if (cpu > cw->stream_cnt) {
  580. pr_err("Event was recorded for CPU %d, limit is at %d.\n",
  581. cpu, cw->stream_cnt);
  582. cpu = 0;
  583. }
  584. return cpu;
  585. }
  586. #define STREAM_FLUSH_COUNT 100000
  587. /*
  588. * Currently we have no other way to determine the
  589. * time for the stream flush other than keep track
  590. * of the number of events and check it against
  591. * threshold.
  592. */
  593. static bool is_flush_needed(struct ctf_stream *cs)
  594. {
  595. return cs->count >= STREAM_FLUSH_COUNT;
  596. }
  597. static int process_sample_event(struct perf_tool *tool,
  598. union perf_event *_event,
  599. struct perf_sample *sample,
  600. struct perf_evsel *evsel,
  601. struct machine *machine __maybe_unused)
  602. {
  603. struct convert *c = container_of(tool, struct convert, tool);
  604. struct evsel_priv *priv = evsel->priv;
  605. struct ctf_writer *cw = &c->writer;
  606. struct ctf_stream *cs;
  607. struct bt_ctf_event_class *event_class;
  608. struct bt_ctf_event *event;
  609. int ret;
  610. if (WARN_ONCE(!priv, "Failed to setup all events.\n"))
  611. return 0;
  612. event_class = priv->event_class;
  613. /* update stats */
  614. c->events_count++;
  615. c->events_size += _event->header.size;
  616. pr_time2(sample->time, "sample %" PRIu64 "\n", c->events_count);
  617. event = bt_ctf_event_create(event_class);
  618. if (!event) {
  619. pr_err("Failed to create an CTF event\n");
  620. return -1;
  621. }
  622. bt_ctf_clock_set_time(cw->clock, sample->time);
  623. ret = add_generic_values(cw, event, evsel, sample);
  624. if (ret)
  625. return -1;
  626. if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
  627. ret = add_tracepoint_values(cw, event_class, event,
  628. evsel, sample);
  629. if (ret)
  630. return -1;
  631. }
  632. if (perf_evsel__is_bpf_output(evsel)) {
  633. ret = add_bpf_output_values(event_class, event, sample);
  634. if (ret)
  635. return -1;
  636. }
  637. cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
  638. if (cs) {
  639. if (is_flush_needed(cs))
  640. ctf_stream__flush(cs);
  641. cs->count++;
  642. bt_ctf_stream_append_event(cs->stream, event);
  643. }
  644. bt_ctf_event_put(event);
  645. return cs ? 0 : -1;
  646. }
  647. #define __NON_SAMPLE_SET_FIELD(_name, _type, _field) \
  648. do { \
  649. ret = value_set_##_type(cw, event, #_field, _event->_name._field);\
  650. if (ret) \
  651. return -1; \
  652. } while(0)
  653. #define __FUNC_PROCESS_NON_SAMPLE(_name, body) \
  654. static int process_##_name##_event(struct perf_tool *tool, \
  655. union perf_event *_event, \
  656. struct perf_sample *sample, \
  657. struct machine *machine) \
  658. { \
  659. struct convert *c = container_of(tool, struct convert, tool);\
  660. struct ctf_writer *cw = &c->writer; \
  661. struct bt_ctf_event_class *event_class = cw->_name##_class;\
  662. struct bt_ctf_event *event; \
  663. struct ctf_stream *cs; \
  664. int ret; \
  665. \
  666. c->non_sample_count++; \
  667. c->events_size += _event->header.size; \
  668. event = bt_ctf_event_create(event_class); \
  669. if (!event) { \
  670. pr_err("Failed to create an CTF event\n"); \
  671. return -1; \
  672. } \
  673. \
  674. bt_ctf_clock_set_time(cw->clock, sample->time); \
  675. body \
  676. cs = ctf_stream(cw, 0); \
  677. if (cs) { \
  678. if (is_flush_needed(cs)) \
  679. ctf_stream__flush(cs); \
  680. \
  681. cs->count++; \
  682. bt_ctf_stream_append_event(cs->stream, event); \
  683. } \
  684. bt_ctf_event_put(event); \
  685. \
  686. return perf_event__process_##_name(tool, _event, sample, machine);\
  687. }
  688. __FUNC_PROCESS_NON_SAMPLE(comm,
  689. __NON_SAMPLE_SET_FIELD(comm, u32, pid);
  690. __NON_SAMPLE_SET_FIELD(comm, u32, tid);
  691. __NON_SAMPLE_SET_FIELD(comm, string, comm);
  692. )
  693. __FUNC_PROCESS_NON_SAMPLE(fork,
  694. __NON_SAMPLE_SET_FIELD(fork, u32, pid);
  695. __NON_SAMPLE_SET_FIELD(fork, u32, ppid);
  696. __NON_SAMPLE_SET_FIELD(fork, u32, tid);
  697. __NON_SAMPLE_SET_FIELD(fork, u32, ptid);
  698. __NON_SAMPLE_SET_FIELD(fork, u64, time);
  699. )
  700. __FUNC_PROCESS_NON_SAMPLE(exit,
  701. __NON_SAMPLE_SET_FIELD(fork, u32, pid);
  702. __NON_SAMPLE_SET_FIELD(fork, u32, ppid);
  703. __NON_SAMPLE_SET_FIELD(fork, u32, tid);
  704. __NON_SAMPLE_SET_FIELD(fork, u32, ptid);
  705. __NON_SAMPLE_SET_FIELD(fork, u64, time);
  706. )
  707. #undef __NON_SAMPLE_SET_FIELD
  708. #undef __FUNC_PROCESS_NON_SAMPLE
  709. /* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
  710. static char *change_name(char *name, char *orig_name, int dup)
  711. {
  712. char *new_name = NULL;
  713. size_t len;
  714. if (!name)
  715. name = orig_name;
  716. if (dup >= 10)
  717. goto out;
  718. /*
  719. * Add '_' prefix to potential keywork. According to
  720. * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
  721. * futher CTF spec updating may require us to use '$'.
  722. */
  723. if (dup < 0)
  724. len = strlen(name) + sizeof("_");
  725. else
  726. len = strlen(orig_name) + sizeof("_dupl_X");
  727. new_name = malloc(len);
  728. if (!new_name)
  729. goto out;
  730. if (dup < 0)
  731. snprintf(new_name, len, "_%s", name);
  732. else
  733. snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
  734. out:
  735. if (name != orig_name)
  736. free(name);
  737. return new_name;
  738. }
  739. static int event_class_add_field(struct bt_ctf_event_class *event_class,
  740. struct bt_ctf_field_type *type,
  741. struct format_field *field)
  742. {
  743. struct bt_ctf_field_type *t = NULL;
  744. char *name;
  745. int dup = 1;
  746. int ret;
  747. /* alias was already assigned */
  748. if (field->alias != field->name)
  749. return bt_ctf_event_class_add_field(event_class, type,
  750. (char *)field->alias);
  751. name = field->name;
  752. /* If 'name' is a keywork, add prefix. */
  753. if (bt_ctf_validate_identifier(name))
  754. name = change_name(name, field->name, -1);
  755. if (!name) {
  756. pr_err("Failed to fix invalid identifier.");
  757. return -1;
  758. }
  759. while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
  760. bt_ctf_field_type_put(t);
  761. name = change_name(name, field->name, dup++);
  762. if (!name) {
  763. pr_err("Failed to create dup name for '%s'\n", field->name);
  764. return -1;
  765. }
  766. }
  767. ret = bt_ctf_event_class_add_field(event_class, type, name);
  768. if (!ret)
  769. field->alias = name;
  770. return ret;
  771. }
  772. static int add_tracepoint_fields_types(struct ctf_writer *cw,
  773. struct format_field *fields,
  774. struct bt_ctf_event_class *event_class)
  775. {
  776. struct format_field *field;
  777. int ret;
  778. for (field = fields; field; field = field->next) {
  779. struct bt_ctf_field_type *type;
  780. unsigned long flags = field->flags;
  781. pr2(" field '%s'\n", field->name);
  782. type = get_tracepoint_field_type(cw, field);
  783. if (!type)
  784. return -1;
  785. /*
  786. * A string is an array of chars. For this we use the string
  787. * type and don't care that it is an array. What we don't
  788. * support is an array of strings.
  789. */
  790. if (flags & FIELD_IS_STRING)
  791. flags &= ~FIELD_IS_ARRAY;
  792. if (flags & FIELD_IS_ARRAY)
  793. type = bt_ctf_field_type_array_create(type, field->arraylen);
  794. ret = event_class_add_field(event_class, type, field);
  795. if (flags & FIELD_IS_ARRAY)
  796. bt_ctf_field_type_put(type);
  797. if (ret) {
  798. pr_err("Failed to add field '%s': %d\n",
  799. field->name, ret);
  800. return -1;
  801. }
  802. }
  803. return 0;
  804. }
  805. static int add_tracepoint_types(struct ctf_writer *cw,
  806. struct perf_evsel *evsel,
  807. struct bt_ctf_event_class *class)
  808. {
  809. struct format_field *common_fields = evsel->tp_format->format.common_fields;
  810. struct format_field *fields = evsel->tp_format->format.fields;
  811. int ret;
  812. ret = add_tracepoint_fields_types(cw, common_fields, class);
  813. if (!ret)
  814. ret = add_tracepoint_fields_types(cw, fields, class);
  815. return ret;
  816. }
  817. static int add_bpf_output_types(struct ctf_writer *cw,
  818. struct bt_ctf_event_class *class)
  819. {
  820. struct bt_ctf_field_type *len_type = cw->data.u32;
  821. struct bt_ctf_field_type *seq_base_type = cw->data.u32_hex;
  822. struct bt_ctf_field_type *seq_type;
  823. int ret;
  824. ret = bt_ctf_event_class_add_field(class, len_type, "raw_len");
  825. if (ret)
  826. return ret;
  827. seq_type = bt_ctf_field_type_sequence_create(seq_base_type, "raw_len");
  828. if (!seq_type)
  829. return -1;
  830. return bt_ctf_event_class_add_field(class, seq_type, "raw_data");
  831. }
  832. static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
  833. struct bt_ctf_event_class *event_class)
  834. {
  835. u64 type = evsel->attr.sample_type;
  836. /*
  837. * missing:
  838. * PERF_SAMPLE_TIME - not needed as we have it in
  839. * ctf event header
  840. * PERF_SAMPLE_READ - TODO
  841. * PERF_SAMPLE_CALLCHAIN - TODO
  842. * PERF_SAMPLE_RAW - tracepoint fields and BPF output
  843. * are handled separately
  844. * PERF_SAMPLE_BRANCH_STACK - TODO
  845. * PERF_SAMPLE_REGS_USER - TODO
  846. * PERF_SAMPLE_STACK_USER - TODO
  847. */
  848. #define ADD_FIELD(cl, t, n) \
  849. do { \
  850. pr2(" field '%s'\n", n); \
  851. if (bt_ctf_event_class_add_field(cl, t, n)) { \
  852. pr_err("Failed to add field '%s';\n", n); \
  853. return -1; \
  854. } \
  855. } while (0)
  856. if (type & PERF_SAMPLE_IP)
  857. ADD_FIELD(event_class, cw->data.u64_hex, "perf_ip");
  858. if (type & PERF_SAMPLE_TID) {
  859. ADD_FIELD(event_class, cw->data.s32, "perf_tid");
  860. ADD_FIELD(event_class, cw->data.s32, "perf_pid");
  861. }
  862. if ((type & PERF_SAMPLE_ID) ||
  863. (type & PERF_SAMPLE_IDENTIFIER))
  864. ADD_FIELD(event_class, cw->data.u64, "perf_id");
  865. if (type & PERF_SAMPLE_STREAM_ID)
  866. ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
  867. if (type & PERF_SAMPLE_PERIOD)
  868. ADD_FIELD(event_class, cw->data.u64, "perf_period");
  869. if (type & PERF_SAMPLE_WEIGHT)
  870. ADD_FIELD(event_class, cw->data.u64, "perf_weight");
  871. if (type & PERF_SAMPLE_DATA_SRC)
  872. ADD_FIELD(event_class, cw->data.u64, "perf_data_src");
  873. if (type & PERF_SAMPLE_TRANSACTION)
  874. ADD_FIELD(event_class, cw->data.u64, "perf_transaction");
  875. #undef ADD_FIELD
  876. return 0;
  877. }
  878. static int add_event(struct ctf_writer *cw, struct perf_evsel *evsel)
  879. {
  880. struct bt_ctf_event_class *event_class;
  881. struct evsel_priv *priv;
  882. const char *name = perf_evsel__name(evsel);
  883. int ret;
  884. pr("Adding event '%s' (type %d)\n", name, evsel->attr.type);
  885. event_class = bt_ctf_event_class_create(name);
  886. if (!event_class)
  887. return -1;
  888. ret = add_generic_types(cw, evsel, event_class);
  889. if (ret)
  890. goto err;
  891. if (evsel->attr.type == PERF_TYPE_TRACEPOINT) {
  892. ret = add_tracepoint_types(cw, evsel, event_class);
  893. if (ret)
  894. goto err;
  895. }
  896. if (perf_evsel__is_bpf_output(evsel)) {
  897. ret = add_bpf_output_types(cw, event_class);
  898. if (ret)
  899. goto err;
  900. }
  901. ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);
  902. if (ret) {
  903. pr("Failed to add event class into stream.\n");
  904. goto err;
  905. }
  906. priv = malloc(sizeof(*priv));
  907. if (!priv)
  908. goto err;
  909. priv->event_class = event_class;
  910. evsel->priv = priv;
  911. return 0;
  912. err:
  913. bt_ctf_event_class_put(event_class);
  914. pr_err("Failed to add event '%s'.\n", name);
  915. return -1;
  916. }
  917. static int setup_events(struct ctf_writer *cw, struct perf_session *session)
  918. {
  919. struct perf_evlist *evlist = session->evlist;
  920. struct perf_evsel *evsel;
  921. int ret;
  922. evlist__for_each_entry(evlist, evsel) {
  923. ret = add_event(cw, evsel);
  924. if (ret)
  925. return ret;
  926. }
  927. return 0;
  928. }
  929. #define __NON_SAMPLE_ADD_FIELD(t, n) \
  930. do { \
  931. pr2(" field '%s'\n", #n); \
  932. if (bt_ctf_event_class_add_field(event_class, cw->data.t, #n)) {\
  933. pr_err("Failed to add field '%s';\n", #n);\
  934. return -1; \
  935. } \
  936. } while(0)
  937. #define __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(_name, body) \
  938. static int add_##_name##_event(struct ctf_writer *cw) \
  939. { \
  940. struct bt_ctf_event_class *event_class; \
  941. int ret; \
  942. \
  943. pr("Adding "#_name" event\n"); \
  944. event_class = bt_ctf_event_class_create("perf_" #_name);\
  945. if (!event_class) \
  946. return -1; \
  947. body \
  948. \
  949. ret = bt_ctf_stream_class_add_event_class(cw->stream_class, event_class);\
  950. if (ret) { \
  951. pr("Failed to add event class '"#_name"' into stream.\n");\
  952. return ret; \
  953. } \
  954. \
  955. cw->_name##_class = event_class; \
  956. bt_ctf_event_class_put(event_class); \
  957. return 0; \
  958. }
  959. __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(comm,
  960. __NON_SAMPLE_ADD_FIELD(u32, pid);
  961. __NON_SAMPLE_ADD_FIELD(u32, tid);
  962. __NON_SAMPLE_ADD_FIELD(string, comm);
  963. )
  964. __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(fork,
  965. __NON_SAMPLE_ADD_FIELD(u32, pid);
  966. __NON_SAMPLE_ADD_FIELD(u32, ppid);
  967. __NON_SAMPLE_ADD_FIELD(u32, tid);
  968. __NON_SAMPLE_ADD_FIELD(u32, ptid);
  969. __NON_SAMPLE_ADD_FIELD(u64, time);
  970. )
  971. __FUNC_ADD_NON_SAMPLE_EVENT_CLASS(exit,
  972. __NON_SAMPLE_ADD_FIELD(u32, pid);
  973. __NON_SAMPLE_ADD_FIELD(u32, ppid);
  974. __NON_SAMPLE_ADD_FIELD(u32, tid);
  975. __NON_SAMPLE_ADD_FIELD(u32, ptid);
  976. __NON_SAMPLE_ADD_FIELD(u64, time);
  977. )
  978. #undef __NON_SAMPLE_ADD_FIELD
  979. #undef __FUNC_ADD_NON_SAMPLE_EVENT_CLASS
  980. static int setup_non_sample_events(struct ctf_writer *cw,
  981. struct perf_session *session __maybe_unused)
  982. {
  983. int ret;
  984. ret = add_comm_event(cw);
  985. if (ret)
  986. return ret;
  987. ret = add_exit_event(cw);
  988. if (ret)
  989. return ret;
  990. ret = add_fork_event(cw);
  991. if (ret)
  992. return ret;
  993. return 0;
  994. }
  995. static void cleanup_events(struct perf_session *session)
  996. {
  997. struct perf_evlist *evlist = session->evlist;
  998. struct perf_evsel *evsel;
  999. evlist__for_each_entry(evlist, evsel) {
  1000. struct evsel_priv *priv;
  1001. priv = evsel->priv;
  1002. bt_ctf_event_class_put(priv->event_class);
  1003. zfree(&evsel->priv);
  1004. }
  1005. perf_evlist__delete(evlist);
  1006. session->evlist = NULL;
  1007. }
  1008. static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
  1009. {
  1010. struct ctf_stream **stream;
  1011. struct perf_header *ph = &session->header;
  1012. int ncpus;
  1013. /*
  1014. * Try to get the number of cpus used in the data file,
  1015. * if not present fallback to the MAX_CPUS.
  1016. */
  1017. ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
  1018. stream = zalloc(sizeof(*stream) * ncpus);
  1019. if (!stream) {
  1020. pr_err("Failed to allocate streams.\n");
  1021. return -ENOMEM;
  1022. }
  1023. cw->stream = stream;
  1024. cw->stream_cnt = ncpus;
  1025. return 0;
  1026. }
  1027. static void free_streams(struct ctf_writer *cw)
  1028. {
  1029. int cpu;
  1030. for (cpu = 0; cpu < cw->stream_cnt; cpu++)
  1031. ctf_stream__delete(cw->stream[cpu]);
  1032. free(cw->stream);
  1033. }
  1034. static int ctf_writer__setup_env(struct ctf_writer *cw,
  1035. struct perf_session *session)
  1036. {
  1037. struct perf_header *header = &session->header;
  1038. struct bt_ctf_writer *writer = cw->writer;
  1039. #define ADD(__n, __v) \
  1040. do { \
  1041. if (bt_ctf_writer_add_environment_field(writer, __n, __v)) \
  1042. return -1; \
  1043. } while (0)
  1044. ADD("host", header->env.hostname);
  1045. ADD("sysname", "Linux");
  1046. ADD("release", header->env.os_release);
  1047. ADD("version", header->env.version);
  1048. ADD("machine", header->env.arch);
  1049. ADD("domain", "kernel");
  1050. ADD("tracer_name", "perf");
  1051. #undef ADD
  1052. return 0;
  1053. }
  1054. static int ctf_writer__setup_clock(struct ctf_writer *cw)
  1055. {
  1056. struct bt_ctf_clock *clock = cw->clock;
  1057. bt_ctf_clock_set_description(clock, "perf clock");
  1058. #define SET(__n, __v) \
  1059. do { \
  1060. if (bt_ctf_clock_set_##__n(clock, __v)) \
  1061. return -1; \
  1062. } while (0)
  1063. SET(frequency, 1000000000);
  1064. SET(offset_s, 0);
  1065. SET(offset, 0);
  1066. SET(precision, 10);
  1067. SET(is_absolute, 0);
  1068. #undef SET
  1069. return 0;
  1070. }
  1071. static struct bt_ctf_field_type *create_int_type(int size, bool sign, bool hex)
  1072. {
  1073. struct bt_ctf_field_type *type;
  1074. type = bt_ctf_field_type_integer_create(size);
  1075. if (!type)
  1076. return NULL;
  1077. if (sign &&
  1078. bt_ctf_field_type_integer_set_signed(type, 1))
  1079. goto err;
  1080. if (hex &&
  1081. bt_ctf_field_type_integer_set_base(type, BT_CTF_INTEGER_BASE_HEXADECIMAL))
  1082. goto err;
  1083. #if __BYTE_ORDER == __BIG_ENDIAN
  1084. bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_BIG_ENDIAN);
  1085. #else
  1086. bt_ctf_field_type_set_byte_order(type, BT_CTF_BYTE_ORDER_LITTLE_ENDIAN);
  1087. #endif
  1088. pr2("Created type: INTEGER %d-bit %ssigned %s\n",
  1089. size, sign ? "un" : "", hex ? "hex" : "");
  1090. return type;
  1091. err:
  1092. bt_ctf_field_type_put(type);
  1093. return NULL;
  1094. }
  1095. static void ctf_writer__cleanup_data(struct ctf_writer *cw)
  1096. {
  1097. unsigned int i;
  1098. for (i = 0; i < ARRAY_SIZE(cw->data.array); i++)
  1099. bt_ctf_field_type_put(cw->data.array[i]);
  1100. }
  1101. static int ctf_writer__init_data(struct ctf_writer *cw)
  1102. {
  1103. #define CREATE_INT_TYPE(type, size, sign, hex) \
  1104. do { \
  1105. (type) = create_int_type(size, sign, hex); \
  1106. if (!(type)) \
  1107. goto err; \
  1108. } while (0)
  1109. CREATE_INT_TYPE(cw->data.s64, 64, true, false);
  1110. CREATE_INT_TYPE(cw->data.u64, 64, false, false);
  1111. CREATE_INT_TYPE(cw->data.s32, 32, true, false);
  1112. CREATE_INT_TYPE(cw->data.u32, 32, false, false);
  1113. CREATE_INT_TYPE(cw->data.u32_hex, 32, false, true);
  1114. CREATE_INT_TYPE(cw->data.u64_hex, 64, false, true);
  1115. cw->data.string = bt_ctf_field_type_string_create();
  1116. if (cw->data.string)
  1117. return 0;
  1118. err:
  1119. ctf_writer__cleanup_data(cw);
  1120. pr_err("Failed to create data types.\n");
  1121. return -1;
  1122. }
  1123. static void ctf_writer__cleanup(struct ctf_writer *cw)
  1124. {
  1125. ctf_writer__cleanup_data(cw);
  1126. bt_ctf_clock_put(cw->clock);
  1127. free_streams(cw);
  1128. bt_ctf_stream_class_put(cw->stream_class);
  1129. bt_ctf_writer_put(cw->writer);
  1130. /* and NULL all the pointers */
  1131. memset(cw, 0, sizeof(*cw));
  1132. }
  1133. static int ctf_writer__init(struct ctf_writer *cw, const char *path)
  1134. {
  1135. struct bt_ctf_writer *writer;
  1136. struct bt_ctf_stream_class *stream_class;
  1137. struct bt_ctf_clock *clock;
  1138. struct bt_ctf_field_type *pkt_ctx_type;
  1139. int ret;
  1140. /* CTF writer */
  1141. writer = bt_ctf_writer_create(path);
  1142. if (!writer)
  1143. goto err;
  1144. cw->writer = writer;
  1145. /* CTF clock */
  1146. clock = bt_ctf_clock_create("perf_clock");
  1147. if (!clock) {
  1148. pr("Failed to create CTF clock.\n");
  1149. goto err_cleanup;
  1150. }
  1151. cw->clock = clock;
  1152. if (ctf_writer__setup_clock(cw)) {
  1153. pr("Failed to setup CTF clock.\n");
  1154. goto err_cleanup;
  1155. }
  1156. /* CTF stream class */
  1157. stream_class = bt_ctf_stream_class_create("perf_stream");
  1158. if (!stream_class) {
  1159. pr("Failed to create CTF stream class.\n");
  1160. goto err_cleanup;
  1161. }
  1162. cw->stream_class = stream_class;
  1163. /* CTF clock stream setup */
  1164. if (bt_ctf_stream_class_set_clock(stream_class, clock)) {
  1165. pr("Failed to assign CTF clock to stream class.\n");
  1166. goto err_cleanup;
  1167. }
  1168. if (ctf_writer__init_data(cw))
  1169. goto err_cleanup;
  1170. /* Add cpu_id for packet context */
  1171. pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
  1172. if (!pkt_ctx_type)
  1173. goto err_cleanup;
  1174. ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
  1175. bt_ctf_field_type_put(pkt_ctx_type);
  1176. if (ret)
  1177. goto err_cleanup;
  1178. /* CTF clock writer setup */
  1179. if (bt_ctf_writer_add_clock(writer, clock)) {
  1180. pr("Failed to assign CTF clock to writer.\n");
  1181. goto err_cleanup;
  1182. }
  1183. return 0;
  1184. err_cleanup:
  1185. ctf_writer__cleanup(cw);
  1186. err:
  1187. pr_err("Failed to setup CTF writer.\n");
  1188. return -1;
  1189. }
  1190. static int ctf_writer__flush_streams(struct ctf_writer *cw)
  1191. {
  1192. int cpu, ret = 0;
  1193. for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
  1194. ret = ctf_stream__flush(cw->stream[cpu]);
  1195. return ret;
  1196. }
  1197. static int convert__config(const char *var, const char *value, void *cb)
  1198. {
  1199. struct convert *c = cb;
  1200. if (!strcmp(var, "convert.queue-size")) {
  1201. c->queue_size = perf_config_u64(var, value);
  1202. return 0;
  1203. }
  1204. return 0;
  1205. }
  1206. int bt_convert__perf2ctf(const char *input, const char *path,
  1207. struct perf_data_convert_opts *opts)
  1208. {
  1209. struct perf_session *session;
  1210. struct perf_data_file file = {
  1211. .path = input,
  1212. .mode = PERF_DATA_MODE_READ,
  1213. .force = opts->force,
  1214. };
  1215. struct convert c = {
  1216. .tool = {
  1217. .sample = process_sample_event,
  1218. .mmap = perf_event__process_mmap,
  1219. .mmap2 = perf_event__process_mmap2,
  1220. .comm = perf_event__process_comm,
  1221. .exit = perf_event__process_exit,
  1222. .fork = perf_event__process_fork,
  1223. .lost = perf_event__process_lost,
  1224. .tracing_data = perf_event__process_tracing_data,
  1225. .build_id = perf_event__process_build_id,
  1226. .ordered_events = true,
  1227. .ordering_requires_timestamps = true,
  1228. },
  1229. };
  1230. struct ctf_writer *cw = &c.writer;
  1231. int err = -1;
  1232. if (opts->all) {
  1233. c.tool.comm = process_comm_event;
  1234. c.tool.exit = process_exit_event;
  1235. c.tool.fork = process_fork_event;
  1236. }
  1237. perf_config(convert__config, &c);
  1238. /* CTF writer */
  1239. if (ctf_writer__init(cw, path))
  1240. return -1;
  1241. /* perf.data session */
  1242. session = perf_session__new(&file, 0, &c.tool);
  1243. if (!session)
  1244. goto free_writer;
  1245. if (c.queue_size) {
  1246. ordered_events__set_alloc_size(&session->ordered_events,
  1247. c.queue_size);
  1248. }
  1249. /* CTF writer env/clock setup */
  1250. if (ctf_writer__setup_env(cw, session))
  1251. goto free_session;
  1252. /* CTF events setup */
  1253. if (setup_events(cw, session))
  1254. goto free_session;
  1255. if (opts->all && setup_non_sample_events(cw, session))
  1256. goto free_session;
  1257. if (setup_streams(cw, session))
  1258. goto free_session;
  1259. err = perf_session__process_events(session);
  1260. if (!err)
  1261. err = ctf_writer__flush_streams(cw);
  1262. else
  1263. pr_err("Error during conversion.\n");
  1264. fprintf(stderr,
  1265. "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
  1266. file.path, path);
  1267. fprintf(stderr,
  1268. "[ perf data convert: Converted and wrote %.3f MB (%" PRIu64 " samples",
  1269. (double) c.events_size / 1024.0 / 1024.0,
  1270. c.events_count);
  1271. if (!c.non_sample_count)
  1272. fprintf(stderr, ") ]\n");
  1273. else
  1274. fprintf(stderr, ", %" PRIu64 " non-samples) ]\n", c.non_sample_count);
  1275. cleanup_events(session);
  1276. perf_session__delete(session);
  1277. ctf_writer__cleanup(cw);
  1278. return err;
  1279. free_session:
  1280. perf_session__delete(session);
  1281. free_writer:
  1282. ctf_writer__cleanup(cw);
  1283. pr_err("Error during conversion setup.\n");
  1284. return err;
  1285. }