trace_events.h 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_TRACE_EVENT_H
  3. #define _LINUX_TRACE_EVENT_H
  4. #include <linux/ring_buffer.h>
  5. #include <linux/trace_seq.h>
  6. #include <linux/percpu.h>
  7. #include <linux/hardirq.h>
  8. #include <linux/perf_event.h>
  9. #include <linux/tracepoint.h>
  10. struct trace_array;
  11. struct trace_buffer;
  12. struct tracer;
  13. struct dentry;
  14. struct bpf_prog;
  15. const char *trace_print_flags_seq(struct trace_seq *p, const char *delim,
  16. unsigned long flags,
  17. const struct trace_print_flags *flag_array);
  18. const char *trace_print_symbols_seq(struct trace_seq *p, unsigned long val,
  19. const struct trace_print_flags *symbol_array);
  20. #if BITS_PER_LONG == 32
  21. const char *trace_print_flags_seq_u64(struct trace_seq *p, const char *delim,
  22. unsigned long long flags,
  23. const struct trace_print_flags_u64 *flag_array);
  24. const char *trace_print_symbols_seq_u64(struct trace_seq *p,
  25. unsigned long long val,
  26. const struct trace_print_flags_u64
  27. *symbol_array);
  28. #endif
  29. const char *trace_print_bitmask_seq(struct trace_seq *p, void *bitmask_ptr,
  30. unsigned int bitmask_size);
  31. const char *trace_print_hex_seq(struct trace_seq *p,
  32. const unsigned char *buf, int len,
  33. bool concatenate);
  34. const char *trace_print_array_seq(struct trace_seq *p,
  35. const void *buf, int count,
  36. size_t el_size);
  37. struct trace_iterator;
  38. struct trace_event;
  39. int trace_raw_output_prep(struct trace_iterator *iter,
  40. struct trace_event *event);
  41. /*
  42. * The trace entry - the most basic unit of tracing. This is what
  43. * is printed in the end as a single line in the trace output, such as:
  44. *
  45. * bash-15816 [01] 235.197585: idle_cpu <- irq_enter
  46. */
  47. struct trace_entry {
  48. unsigned short type;
  49. unsigned char flags;
  50. unsigned char preempt_count;
  51. int pid;
  52. };
  53. #define TRACE_EVENT_TYPE_MAX \
  54. ((1 << (sizeof(((struct trace_entry *)0)->type) * 8)) - 1)
  55. /*
  56. * Trace iterator - used by printout routines who present trace
  57. * results to users and which routines might sleep, etc:
  58. */
  59. struct trace_iterator {
  60. struct trace_array *tr;
  61. struct tracer *trace;
  62. struct trace_buffer *trace_buffer;
  63. void *private;
  64. int cpu_file;
  65. struct mutex mutex;
  66. struct ring_buffer_iter **buffer_iter;
  67. unsigned long iter_flags;
  68. /* trace_seq for __print_flags() and __print_symbolic() etc. */
  69. struct trace_seq tmp_seq;
  70. cpumask_var_t started;
  71. /* it's true when current open file is snapshot */
  72. bool snapshot;
  73. /* The below is zeroed out in pipe_read */
  74. struct trace_seq seq;
  75. struct trace_entry *ent;
  76. unsigned long lost_events;
  77. int leftover;
  78. int ent_size;
  79. int cpu;
  80. u64 ts;
  81. loff_t pos;
  82. long idx;
  83. /* All new field here will be zeroed out in pipe_read */
  84. };
  85. enum trace_iter_flags {
  86. TRACE_FILE_LAT_FMT = 1,
  87. TRACE_FILE_ANNOTATE = 2,
  88. TRACE_FILE_TIME_IN_NS = 4,
  89. };
  90. typedef enum print_line_t (*trace_print_func)(struct trace_iterator *iter,
  91. int flags, struct trace_event *event);
  92. struct trace_event_functions {
  93. trace_print_func trace;
  94. trace_print_func raw;
  95. trace_print_func hex;
  96. trace_print_func binary;
  97. };
  98. struct trace_event {
  99. struct hlist_node node;
  100. struct list_head list;
  101. int type;
  102. struct trace_event_functions *funcs;
  103. };
  104. extern int register_trace_event(struct trace_event *event);
  105. extern int unregister_trace_event(struct trace_event *event);
  106. /* Return values for print_line callback */
  107. enum print_line_t {
  108. TRACE_TYPE_PARTIAL_LINE = 0, /* Retry after flushing the seq */
  109. TRACE_TYPE_HANDLED = 1,
  110. TRACE_TYPE_UNHANDLED = 2, /* Relay to other output functions */
  111. TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
  112. };
  113. enum print_line_t trace_handle_return(struct trace_seq *s);
  114. void tracing_generic_entry_update(struct trace_entry *entry,
  115. unsigned long flags,
  116. int pc);
  117. struct trace_event_file;
  118. struct ring_buffer_event *
  119. trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
  120. struct trace_event_file *trace_file,
  121. int type, unsigned long len,
  122. unsigned long flags, int pc);
  123. #define TRACE_RECORD_CMDLINE BIT(0)
  124. #define TRACE_RECORD_TGID BIT(1)
  125. void tracing_record_taskinfo(struct task_struct *task, int flags);
  126. void tracing_record_taskinfo_sched_switch(struct task_struct *prev,
  127. struct task_struct *next, int flags);
  128. void tracing_record_cmdline(struct task_struct *task);
  129. void tracing_record_tgid(struct task_struct *task);
  130. int trace_output_call(struct trace_iterator *iter, char *name, char *fmt, ...);
  131. struct event_filter;
  132. enum trace_reg {
  133. TRACE_REG_REGISTER,
  134. TRACE_REG_UNREGISTER,
  135. #ifdef CONFIG_PERF_EVENTS
  136. TRACE_REG_PERF_REGISTER,
  137. TRACE_REG_PERF_UNREGISTER,
  138. TRACE_REG_PERF_OPEN,
  139. TRACE_REG_PERF_CLOSE,
  140. /*
  141. * These (ADD/DEL) use a 'boolean' return value, where 1 (true) means a
  142. * custom action was taken and the default action is not to be
  143. * performed.
  144. */
  145. TRACE_REG_PERF_ADD,
  146. TRACE_REG_PERF_DEL,
  147. #endif
  148. };
  149. struct trace_event_call;
  150. struct trace_event_class {
  151. const char *system;
  152. void *probe;
  153. #ifdef CONFIG_PERF_EVENTS
  154. void *perf_probe;
  155. #endif
  156. int (*reg)(struct trace_event_call *event,
  157. enum trace_reg type, void *data);
  158. int (*define_fields)(struct trace_event_call *);
  159. struct list_head *(*get_fields)(struct trace_event_call *);
  160. struct list_head fields;
  161. int (*raw_init)(struct trace_event_call *);
  162. };
  163. extern int trace_event_reg(struct trace_event_call *event,
  164. enum trace_reg type, void *data);
  165. struct trace_event_buffer {
  166. struct ring_buffer *buffer;
  167. struct ring_buffer_event *event;
  168. struct trace_event_file *trace_file;
  169. void *entry;
  170. unsigned long flags;
  171. int pc;
  172. };
  173. void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
  174. struct trace_event_file *trace_file,
  175. unsigned long len);
  176. void trace_event_buffer_commit(struct trace_event_buffer *fbuffer);
  177. enum {
  178. TRACE_EVENT_FL_FILTERED_BIT,
  179. TRACE_EVENT_FL_CAP_ANY_BIT,
  180. TRACE_EVENT_FL_NO_SET_FILTER_BIT,
  181. TRACE_EVENT_FL_IGNORE_ENABLE_BIT,
  182. TRACE_EVENT_FL_TRACEPOINT_BIT,
  183. TRACE_EVENT_FL_KPROBE_BIT,
  184. TRACE_EVENT_FL_UPROBE_BIT,
  185. };
  186. /*
  187. * Event flags:
  188. * FILTERED - The event has a filter attached
  189. * CAP_ANY - Any user can enable for perf
  190. * NO_SET_FILTER - Set when filter has error and is to be ignored
  191. * IGNORE_ENABLE - For trace internal events, do not enable with debugfs file
  192. * TRACEPOINT - Event is a tracepoint
  193. * KPROBE - Event is a kprobe
  194. * UPROBE - Event is a uprobe
  195. */
  196. enum {
  197. TRACE_EVENT_FL_FILTERED = (1 << TRACE_EVENT_FL_FILTERED_BIT),
  198. TRACE_EVENT_FL_CAP_ANY = (1 << TRACE_EVENT_FL_CAP_ANY_BIT),
  199. TRACE_EVENT_FL_NO_SET_FILTER = (1 << TRACE_EVENT_FL_NO_SET_FILTER_BIT),
  200. TRACE_EVENT_FL_IGNORE_ENABLE = (1 << TRACE_EVENT_FL_IGNORE_ENABLE_BIT),
  201. TRACE_EVENT_FL_TRACEPOINT = (1 << TRACE_EVENT_FL_TRACEPOINT_BIT),
  202. TRACE_EVENT_FL_KPROBE = (1 << TRACE_EVENT_FL_KPROBE_BIT),
  203. TRACE_EVENT_FL_UPROBE = (1 << TRACE_EVENT_FL_UPROBE_BIT),
  204. };
  205. #define TRACE_EVENT_FL_UKPROBE (TRACE_EVENT_FL_KPROBE | TRACE_EVENT_FL_UPROBE)
  206. struct trace_event_call {
  207. struct list_head list;
  208. struct trace_event_class *class;
  209. union {
  210. char *name;
  211. /* Set TRACE_EVENT_FL_TRACEPOINT flag when using "tp" */
  212. struct tracepoint *tp;
  213. };
  214. struct trace_event event;
  215. char *print_fmt;
  216. struct event_filter *filter;
  217. void *mod;
  218. void *data;
  219. /*
  220. * bit 0: filter_active
  221. * bit 1: allow trace by non root (cap any)
  222. * bit 2: failed to apply filter
  223. * bit 3: trace internal event (do not enable)
  224. * bit 4: Event was enabled by module
  225. * bit 5: use call filter rather than file filter
  226. * bit 6: Event is a tracepoint
  227. */
  228. int flags; /* static flags of different events */
  229. #ifdef CONFIG_PERF_EVENTS
  230. int perf_refcount;
  231. struct hlist_head __percpu *perf_events;
  232. struct bpf_prog_array __rcu *prog_array;
  233. int (*perf_perm)(struct trace_event_call *,
  234. struct perf_event *);
  235. #endif
  236. };
  237. #ifdef CONFIG_PERF_EVENTS
  238. static inline bool bpf_prog_array_valid(struct trace_event_call *call)
  239. {
  240. /*
  241. * This inline function checks whether call->prog_array
  242. * is valid or not. The function is called in various places,
  243. * outside rcu_read_lock/unlock, as a heuristic to speed up execution.
  244. *
  245. * If this function returns true, and later call->prog_array
  246. * becomes false inside rcu_read_lock/unlock region,
  247. * we bail out then. If this function return false,
  248. * there is a risk that we might miss a few events if the checking
  249. * were delayed until inside rcu_read_lock/unlock region and
  250. * call->prog_array happened to become non-NULL then.
  251. *
  252. * Here, READ_ONCE() is used instead of rcu_access_pointer().
  253. * rcu_access_pointer() requires the actual definition of
  254. * "struct bpf_prog_array" while READ_ONCE() only needs
  255. * a declaration of the same type.
  256. */
  257. return !!READ_ONCE(call->prog_array);
  258. }
  259. #endif
  260. static inline const char *
  261. trace_event_name(struct trace_event_call *call)
  262. {
  263. if (call->flags & TRACE_EVENT_FL_TRACEPOINT)
  264. return call->tp ? call->tp->name : NULL;
  265. else
  266. return call->name;
  267. }
  268. struct trace_array;
  269. struct trace_subsystem_dir;
  270. enum {
  271. EVENT_FILE_FL_ENABLED_BIT,
  272. EVENT_FILE_FL_RECORDED_CMD_BIT,
  273. EVENT_FILE_FL_RECORDED_TGID_BIT,
  274. EVENT_FILE_FL_FILTERED_BIT,
  275. EVENT_FILE_FL_NO_SET_FILTER_BIT,
  276. EVENT_FILE_FL_SOFT_MODE_BIT,
  277. EVENT_FILE_FL_SOFT_DISABLED_BIT,
  278. EVENT_FILE_FL_TRIGGER_MODE_BIT,
  279. EVENT_FILE_FL_TRIGGER_COND_BIT,
  280. EVENT_FILE_FL_PID_FILTER_BIT,
  281. EVENT_FILE_FL_WAS_ENABLED_BIT,
  282. };
  283. /*
  284. * Event file flags:
  285. * ENABLED - The event is enabled
  286. * RECORDED_CMD - The comms should be recorded at sched_switch
  287. * RECORDED_TGID - The tgids should be recorded at sched_switch
  288. * FILTERED - The event has a filter attached
  289. * NO_SET_FILTER - Set when filter has error and is to be ignored
  290. * SOFT_MODE - The event is enabled/disabled by SOFT_DISABLED
  291. * SOFT_DISABLED - When set, do not trace the event (even though its
  292. * tracepoint may be enabled)
  293. * TRIGGER_MODE - When set, invoke the triggers associated with the event
  294. * TRIGGER_COND - When set, one or more triggers has an associated filter
  295. * PID_FILTER - When set, the event is filtered based on pid
  296. * WAS_ENABLED - Set when enabled to know to clear trace on module removal
  297. */
  298. enum {
  299. EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
  300. EVENT_FILE_FL_RECORDED_CMD = (1 << EVENT_FILE_FL_RECORDED_CMD_BIT),
  301. EVENT_FILE_FL_RECORDED_TGID = (1 << EVENT_FILE_FL_RECORDED_TGID_BIT),
  302. EVENT_FILE_FL_FILTERED = (1 << EVENT_FILE_FL_FILTERED_BIT),
  303. EVENT_FILE_FL_NO_SET_FILTER = (1 << EVENT_FILE_FL_NO_SET_FILTER_BIT),
  304. EVENT_FILE_FL_SOFT_MODE = (1 << EVENT_FILE_FL_SOFT_MODE_BIT),
  305. EVENT_FILE_FL_SOFT_DISABLED = (1 << EVENT_FILE_FL_SOFT_DISABLED_BIT),
  306. EVENT_FILE_FL_TRIGGER_MODE = (1 << EVENT_FILE_FL_TRIGGER_MODE_BIT),
  307. EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
  308. EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
  309. EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
  310. };
  311. struct trace_event_file {
  312. struct list_head list;
  313. struct trace_event_call *event_call;
  314. struct event_filter __rcu *filter;
  315. struct dentry *dir;
  316. struct trace_array *tr;
  317. struct trace_subsystem_dir *system;
  318. struct list_head triggers;
  319. /*
  320. * 32 bit flags:
  321. * bit 0: enabled
  322. * bit 1: enabled cmd record
  323. * bit 2: enable/disable with the soft disable bit
  324. * bit 3: soft disabled
  325. * bit 4: trigger enabled
  326. *
  327. * Note: The bits must be set atomically to prevent races
  328. * from other writers. Reads of flags do not need to be in
  329. * sync as they occur in critical sections. But the way flags
  330. * is currently used, these changes do not affect the code
  331. * except that when a change is made, it may have a slight
  332. * delay in propagating the changes to other CPUs due to
  333. * caching and such. Which is mostly OK ;-)
  334. */
  335. unsigned long flags;
  336. atomic_t sm_ref; /* soft-mode reference counter */
  337. atomic_t tm_ref; /* trigger-mode reference counter */
  338. };
  339. #define __TRACE_EVENT_FLAGS(name, value) \
  340. static int __init trace_init_flags_##name(void) \
  341. { \
  342. event_##name.flags |= value; \
  343. return 0; \
  344. } \
  345. early_initcall(trace_init_flags_##name);
  346. #define __TRACE_EVENT_PERF_PERM(name, expr...) \
  347. static int perf_perm_##name(struct trace_event_call *tp_event, \
  348. struct perf_event *p_event) \
  349. { \
  350. return ({ expr; }); \
  351. } \
  352. static int __init trace_init_perf_perm_##name(void) \
  353. { \
  354. event_##name.perf_perm = &perf_perm_##name; \
  355. return 0; \
  356. } \
  357. early_initcall(trace_init_perf_perm_##name);
  358. #define PERF_MAX_TRACE_SIZE 2048
  359. #define MAX_FILTER_STR_VAL 256 /* Should handle KSYM_SYMBOL_LEN */
  360. enum event_trigger_type {
  361. ETT_NONE = (0),
  362. ETT_TRACE_ONOFF = (1 << 0),
  363. ETT_SNAPSHOT = (1 << 1),
  364. ETT_STACKTRACE = (1 << 2),
  365. ETT_EVENT_ENABLE = (1 << 3),
  366. ETT_EVENT_HIST = (1 << 4),
  367. ETT_HIST_ENABLE = (1 << 5),
  368. };
  369. extern int filter_match_preds(struct event_filter *filter, void *rec);
  370. extern enum event_trigger_type
  371. event_triggers_call(struct trace_event_file *file, void *rec,
  372. struct ring_buffer_event *event);
  373. extern void
  374. event_triggers_post_call(struct trace_event_file *file,
  375. enum event_trigger_type tt);
  376. bool trace_event_ignore_this_pid(struct trace_event_file *trace_file);
  377. /**
  378. * trace_trigger_soft_disabled - do triggers and test if soft disabled
  379. * @file: The file pointer of the event to test
  380. *
  381. * If any triggers without filters are attached to this event, they
  382. * will be called here. If the event is soft disabled and has no
  383. * triggers that require testing the fields, it will return true,
  384. * otherwise false.
  385. */
  386. static inline bool
  387. trace_trigger_soft_disabled(struct trace_event_file *file)
  388. {
  389. unsigned long eflags = file->flags;
  390. if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
  391. if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
  392. event_triggers_call(file, NULL, NULL);
  393. if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
  394. return true;
  395. if (eflags & EVENT_FILE_FL_PID_FILTER)
  396. return trace_event_ignore_this_pid(file);
  397. }
  398. return false;
  399. }
  400. #ifdef CONFIG_BPF_EVENTS
  401. unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx);
  402. int perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog);
  403. void perf_event_detach_bpf_prog(struct perf_event *event);
  404. int perf_event_query_prog_array(struct perf_event *event, void __user *info);
  405. int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
  406. int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog);
  407. struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name);
  408. int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
  409. u32 *fd_type, const char **buf,
  410. u64 *probe_offset, u64 *probe_addr);
  411. #else
  412. static inline unsigned int trace_call_bpf(struct trace_event_call *call, void *ctx)
  413. {
  414. return 1;
  415. }
  416. static inline int
  417. perf_event_attach_bpf_prog(struct perf_event *event, struct bpf_prog *prog)
  418. {
  419. return -EOPNOTSUPP;
  420. }
  421. static inline void perf_event_detach_bpf_prog(struct perf_event *event) { }
  422. static inline int
  423. perf_event_query_prog_array(struct perf_event *event, void __user *info)
  424. {
  425. return -EOPNOTSUPP;
  426. }
  427. static inline int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *p)
  428. {
  429. return -EOPNOTSUPP;
  430. }
  431. static inline int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *p)
  432. {
  433. return -EOPNOTSUPP;
  434. }
  435. static inline struct bpf_raw_event_map *bpf_find_raw_tracepoint(const char *name)
  436. {
  437. return NULL;
  438. }
  439. static inline int bpf_get_perf_event_info(const struct perf_event *event,
  440. u32 *prog_id, u32 *fd_type,
  441. const char **buf, u64 *probe_offset,
  442. u64 *probe_addr)
  443. {
  444. return -EOPNOTSUPP;
  445. }
  446. #endif
  447. enum {
  448. FILTER_OTHER = 0,
  449. FILTER_STATIC_STRING,
  450. FILTER_DYN_STRING,
  451. FILTER_PTR_STRING,
  452. FILTER_TRACE_FN,
  453. FILTER_COMM,
  454. FILTER_CPU,
  455. };
  456. extern int trace_event_raw_init(struct trace_event_call *call);
  457. extern int trace_define_field(struct trace_event_call *call, const char *type,
  458. const char *name, int offset, int size,
  459. int is_signed, int filter_type);
  460. extern int trace_add_event_call_nolock(struct trace_event_call *call);
  461. extern int trace_remove_event_call_nolock(struct trace_event_call *call);
  462. extern int trace_add_event_call(struct trace_event_call *call);
  463. extern int trace_remove_event_call(struct trace_event_call *call);
  464. extern int trace_event_get_offsets(struct trace_event_call *call);
  465. #define is_signed_type(type) (((type)(-1)) < (type)1)
  466. int trace_set_clr_event(const char *system, const char *event, int set);
  467. /*
  468. * The double __builtin_constant_p is because gcc will give us an error
  469. * if we try to allocate the static variable to fmt if it is not a
  470. * constant. Even with the outer if statement optimizing out.
  471. */
  472. #define event_trace_printk(ip, fmt, args...) \
  473. do { \
  474. __trace_printk_check_format(fmt, ##args); \
  475. tracing_record_cmdline(current); \
  476. if (__builtin_constant_p(fmt)) { \
  477. static const char *trace_printk_fmt \
  478. __attribute__((section("__trace_printk_fmt"))) = \
  479. __builtin_constant_p(fmt) ? fmt : NULL; \
  480. \
  481. __trace_bprintk(ip, trace_printk_fmt, ##args); \
  482. } else \
  483. __trace_printk(ip, fmt, ##args); \
  484. } while (0)
  485. #ifdef CONFIG_PERF_EVENTS
  486. struct perf_event;
  487. DECLARE_PER_CPU(struct pt_regs, perf_trace_regs);
  488. DECLARE_PER_CPU(int, bpf_kprobe_override);
  489. extern int perf_trace_init(struct perf_event *event);
  490. extern void perf_trace_destroy(struct perf_event *event);
  491. extern int perf_trace_add(struct perf_event *event, int flags);
  492. extern void perf_trace_del(struct perf_event *event, int flags);
  493. #ifdef CONFIG_KPROBE_EVENTS
  494. extern int perf_kprobe_init(struct perf_event *event, bool is_retprobe);
  495. extern void perf_kprobe_destroy(struct perf_event *event);
  496. extern int bpf_get_kprobe_info(const struct perf_event *event,
  497. u32 *fd_type, const char **symbol,
  498. u64 *probe_offset, u64 *probe_addr,
  499. bool perf_type_tracepoint);
  500. #endif
  501. #ifdef CONFIG_UPROBE_EVENTS
  502. extern int perf_uprobe_init(struct perf_event *event, bool is_retprobe);
  503. extern void perf_uprobe_destroy(struct perf_event *event);
  504. extern int bpf_get_uprobe_info(const struct perf_event *event,
  505. u32 *fd_type, const char **filename,
  506. u64 *probe_offset, bool perf_type_tracepoint);
  507. #endif
  508. extern int ftrace_profile_set_filter(struct perf_event *event, int event_id,
  509. char *filter_str);
  510. extern void ftrace_profile_free_filter(struct perf_event *event);
  511. void perf_trace_buf_update(void *record, u16 type);
  512. void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp);
  513. void bpf_trace_run1(struct bpf_prog *prog, u64 arg1);
  514. void bpf_trace_run2(struct bpf_prog *prog, u64 arg1, u64 arg2);
  515. void bpf_trace_run3(struct bpf_prog *prog, u64 arg1, u64 arg2,
  516. u64 arg3);
  517. void bpf_trace_run4(struct bpf_prog *prog, u64 arg1, u64 arg2,
  518. u64 arg3, u64 arg4);
  519. void bpf_trace_run5(struct bpf_prog *prog, u64 arg1, u64 arg2,
  520. u64 arg3, u64 arg4, u64 arg5);
  521. void bpf_trace_run6(struct bpf_prog *prog, u64 arg1, u64 arg2,
  522. u64 arg3, u64 arg4, u64 arg5, u64 arg6);
  523. void bpf_trace_run7(struct bpf_prog *prog, u64 arg1, u64 arg2,
  524. u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7);
  525. void bpf_trace_run8(struct bpf_prog *prog, u64 arg1, u64 arg2,
  526. u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
  527. u64 arg8);
  528. void bpf_trace_run9(struct bpf_prog *prog, u64 arg1, u64 arg2,
  529. u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
  530. u64 arg8, u64 arg9);
  531. void bpf_trace_run10(struct bpf_prog *prog, u64 arg1, u64 arg2,
  532. u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
  533. u64 arg8, u64 arg9, u64 arg10);
  534. void bpf_trace_run11(struct bpf_prog *prog, u64 arg1, u64 arg2,
  535. u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
  536. u64 arg8, u64 arg9, u64 arg10, u64 arg11);
  537. void bpf_trace_run12(struct bpf_prog *prog, u64 arg1, u64 arg2,
  538. u64 arg3, u64 arg4, u64 arg5, u64 arg6, u64 arg7,
  539. u64 arg8, u64 arg9, u64 arg10, u64 arg11, u64 arg12);
  540. void perf_trace_run_bpf_submit(void *raw_data, int size, int rctx,
  541. struct trace_event_call *call, u64 count,
  542. struct pt_regs *regs, struct hlist_head *head,
  543. struct task_struct *task);
  544. static inline void
  545. perf_trace_buf_submit(void *raw_data, int size, int rctx, u16 type,
  546. u64 count, struct pt_regs *regs, void *head,
  547. struct task_struct *task)
  548. {
  549. perf_tp_event(type, count, raw_data, size, regs, head, rctx, task);
  550. }
  551. #endif
  552. #endif /* _LINUX_TRACE_EVENT_H */