trace_syscalls.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751
  1. #include <trace/syscall.h>
  2. #include <trace/events/syscalls.h>
  3. #include <linux/syscalls.h>
  4. #include <linux/slab.h>
  5. #include <linux/kernel.h>
  6. #include <linux/module.h> /* for MODULE_NAME_LEN via KSYM_SYMBOL_LEN */
  7. #include <linux/ftrace.h>
  8. #include <linux/perf_event.h>
  9. #include <asm/syscall.h>
  10. #include "trace_output.h"
  11. #include "trace.h"
  12. static DEFINE_MUTEX(syscall_trace_lock);
  13. static int syscall_enter_register(struct trace_event_call *event,
  14. enum trace_reg type, void *data);
  15. static int syscall_exit_register(struct trace_event_call *event,
  16. enum trace_reg type, void *data);
  17. static struct list_head *
  18. syscall_get_enter_fields(struct trace_event_call *call)
  19. {
  20. struct syscall_metadata *entry = call->data;
  21. return &entry->enter_fields;
  22. }
  23. extern struct syscall_metadata *__start_syscalls_metadata[];
  24. extern struct syscall_metadata *__stop_syscalls_metadata[];
  25. static struct syscall_metadata **syscalls_metadata;
  26. #ifndef ARCH_HAS_SYSCALL_MATCH_SYM_NAME
  27. static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
  28. {
  29. /*
  30. * Only compare after the "sys" prefix. Archs that use
  31. * syscall wrappers may have syscalls symbols aliases prefixed
  32. * with ".SyS" or ".sys" instead of "sys", leading to an unwanted
  33. * mismatch.
  34. */
  35. return !strcmp(sym + 3, name + 3);
  36. }
  37. #endif
  38. #ifdef ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
  39. /*
  40. * Some architectures that allow for 32bit applications
  41. * to run on a 64bit kernel, do not map the syscalls for
  42. * the 32bit tasks the same as they do for 64bit tasks.
  43. *
  44. * *cough*x86*cough*
  45. *
  46. * In such a case, instead of reporting the wrong syscalls,
  47. * simply ignore them.
  48. *
  49. * For an arch to ignore the compat syscalls it needs to
  50. * define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS as well as
  51. * define the function arch_trace_is_compat_syscall() to let
  52. * the tracing system know that it should ignore it.
  53. */
  54. static int
  55. trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
  56. {
  57. if (unlikely(arch_trace_is_compat_syscall(regs)))
  58. return -1;
  59. return syscall_get_nr(task, regs);
  60. }
  61. #else
  62. static inline int
  63. trace_get_syscall_nr(struct task_struct *task, struct pt_regs *regs)
  64. {
  65. return syscall_get_nr(task, regs);
  66. }
  67. #endif /* ARCH_TRACE_IGNORE_COMPAT_SYSCALLS */
  68. static __init struct syscall_metadata *
  69. find_syscall_meta(unsigned long syscall)
  70. {
  71. struct syscall_metadata **start;
  72. struct syscall_metadata **stop;
  73. char str[KSYM_SYMBOL_LEN];
  74. start = __start_syscalls_metadata;
  75. stop = __stop_syscalls_metadata;
  76. kallsyms_lookup(syscall, NULL, NULL, NULL, str);
  77. if (arch_syscall_match_sym_name(str, "sys_ni_syscall"))
  78. return NULL;
  79. for ( ; start < stop; start++) {
  80. if ((*start)->name && arch_syscall_match_sym_name(str, (*start)->name))
  81. return *start;
  82. }
  83. return NULL;
  84. }
  85. static struct syscall_metadata *syscall_nr_to_meta(int nr)
  86. {
  87. if (!syscalls_metadata || nr >= NR_syscalls || nr < 0)
  88. return NULL;
  89. return syscalls_metadata[nr];
  90. }
  91. static enum print_line_t
  92. print_syscall_enter(struct trace_iterator *iter, int flags,
  93. struct trace_event *event)
  94. {
  95. struct trace_seq *s = &iter->seq;
  96. struct trace_entry *ent = iter->ent;
  97. struct syscall_trace_enter *trace;
  98. struct syscall_metadata *entry;
  99. int i, syscall;
  100. trace = (typeof(trace))ent;
  101. syscall = trace->nr;
  102. entry = syscall_nr_to_meta(syscall);
  103. if (!entry)
  104. goto end;
  105. if (entry->enter_event->event.type != ent->type) {
  106. WARN_ON_ONCE(1);
  107. goto end;
  108. }
  109. trace_seq_printf(s, "%s(", entry->name);
  110. for (i = 0; i < entry->nb_args; i++) {
  111. if (trace_seq_has_overflowed(s))
  112. goto end;
  113. /* parameter types */
  114. if (trace_flags & TRACE_ITER_VERBOSE)
  115. trace_seq_printf(s, "%s ", entry->types[i]);
  116. /* parameter values */
  117. trace_seq_printf(s, "%s: %lx%s", entry->args[i],
  118. trace->args[i],
  119. i == entry->nb_args - 1 ? "" : ", ");
  120. }
  121. trace_seq_putc(s, ')');
  122. end:
  123. trace_seq_putc(s, '\n');
  124. return trace_handle_return(s);
  125. }
  126. static enum print_line_t
  127. print_syscall_exit(struct trace_iterator *iter, int flags,
  128. struct trace_event *event)
  129. {
  130. struct trace_seq *s = &iter->seq;
  131. struct trace_entry *ent = iter->ent;
  132. struct syscall_trace_exit *trace;
  133. int syscall;
  134. struct syscall_metadata *entry;
  135. trace = (typeof(trace))ent;
  136. syscall = trace->nr;
  137. entry = syscall_nr_to_meta(syscall);
  138. if (!entry) {
  139. trace_seq_putc(s, '\n');
  140. goto out;
  141. }
  142. if (entry->exit_event->event.type != ent->type) {
  143. WARN_ON_ONCE(1);
  144. return TRACE_TYPE_UNHANDLED;
  145. }
  146. trace_seq_printf(s, "%s -> 0x%lx\n", entry->name,
  147. trace->ret);
  148. out:
  149. return trace_handle_return(s);
  150. }
  151. extern char *__bad_type_size(void);
  152. #define SYSCALL_FIELD(type, name) \
  153. sizeof(type) != sizeof(trace.name) ? \
  154. __bad_type_size() : \
  155. #type, #name, offsetof(typeof(trace), name), \
  156. sizeof(trace.name), is_signed_type(type)
  157. static int __init
  158. __set_enter_print_fmt(struct syscall_metadata *entry, char *buf, int len)
  159. {
  160. int i;
  161. int pos = 0;
  162. /* When len=0, we just calculate the needed length */
  163. #define LEN_OR_ZERO (len ? len - pos : 0)
  164. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  165. for (i = 0; i < entry->nb_args; i++) {
  166. pos += snprintf(buf + pos, LEN_OR_ZERO, "%s: 0x%%0%zulx%s",
  167. entry->args[i], sizeof(unsigned long),
  168. i == entry->nb_args - 1 ? "" : ", ");
  169. }
  170. pos += snprintf(buf + pos, LEN_OR_ZERO, "\"");
  171. for (i = 0; i < entry->nb_args; i++) {
  172. pos += snprintf(buf + pos, LEN_OR_ZERO,
  173. ", ((unsigned long)(REC->%s))", entry->args[i]);
  174. }
  175. #undef LEN_OR_ZERO
  176. /* return the length of print_fmt */
  177. return pos;
  178. }
  179. static int __init set_syscall_print_fmt(struct trace_event_call *call)
  180. {
  181. char *print_fmt;
  182. int len;
  183. struct syscall_metadata *entry = call->data;
  184. if (entry->enter_event != call) {
  185. call->print_fmt = "\"0x%lx\", REC->ret";
  186. return 0;
  187. }
  188. /* First: called with 0 length to calculate the needed length */
  189. len = __set_enter_print_fmt(entry, NULL, 0);
  190. print_fmt = kmalloc(len + 1, GFP_KERNEL);
  191. if (!print_fmt)
  192. return -ENOMEM;
  193. /* Second: actually write the @print_fmt */
  194. __set_enter_print_fmt(entry, print_fmt, len + 1);
  195. call->print_fmt = print_fmt;
  196. return 0;
  197. }
  198. static void __init free_syscall_print_fmt(struct trace_event_call *call)
  199. {
  200. struct syscall_metadata *entry = call->data;
  201. if (entry->enter_event == call)
  202. kfree(call->print_fmt);
  203. }
  204. static int __init syscall_enter_define_fields(struct trace_event_call *call)
  205. {
  206. struct syscall_trace_enter trace;
  207. struct syscall_metadata *meta = call->data;
  208. int ret;
  209. int i;
  210. int offset = offsetof(typeof(trace), args);
  211. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  212. if (ret)
  213. return ret;
  214. for (i = 0; i < meta->nb_args; i++) {
  215. ret = trace_define_field(call, meta->types[i],
  216. meta->args[i], offset,
  217. sizeof(unsigned long), 0,
  218. FILTER_OTHER);
  219. offset += sizeof(unsigned long);
  220. }
  221. return ret;
  222. }
  223. static int __init syscall_exit_define_fields(struct trace_event_call *call)
  224. {
  225. struct syscall_trace_exit trace;
  226. int ret;
  227. ret = trace_define_field(call, SYSCALL_FIELD(int, nr), FILTER_OTHER);
  228. if (ret)
  229. return ret;
  230. ret = trace_define_field(call, SYSCALL_FIELD(long, ret),
  231. FILTER_OTHER);
  232. return ret;
  233. }
  234. static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
  235. {
  236. struct trace_array *tr = data;
  237. struct trace_event_file *trace_file;
  238. struct syscall_trace_enter *entry;
  239. struct syscall_metadata *sys_data;
  240. struct ring_buffer_event *event;
  241. struct ring_buffer *buffer;
  242. unsigned long irq_flags;
  243. int pc;
  244. int syscall_nr;
  245. int size;
  246. syscall_nr = trace_get_syscall_nr(current, regs);
  247. if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
  248. return;
  249. /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
  250. trace_file = rcu_dereference_sched(tr->enter_syscall_files[syscall_nr]);
  251. if (!trace_file)
  252. return;
  253. if (trace_trigger_soft_disabled(trace_file))
  254. return;
  255. sys_data = syscall_nr_to_meta(syscall_nr);
  256. if (!sys_data)
  257. return;
  258. size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
  259. local_save_flags(irq_flags);
  260. pc = preempt_count();
  261. buffer = tr->trace_buffer.buffer;
  262. event = trace_buffer_lock_reserve(buffer,
  263. sys_data->enter_event->event.type, size, irq_flags, pc);
  264. if (!event)
  265. return;
  266. entry = ring_buffer_event_data(event);
  267. entry->nr = syscall_nr;
  268. syscall_get_arguments(current, regs, 0, sys_data->nb_args, entry->args);
  269. event_trigger_unlock_commit(trace_file, buffer, event, entry,
  270. irq_flags, pc);
  271. }
  272. static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
  273. {
  274. struct trace_array *tr = data;
  275. struct trace_event_file *trace_file;
  276. struct syscall_trace_exit *entry;
  277. struct syscall_metadata *sys_data;
  278. struct ring_buffer_event *event;
  279. struct ring_buffer *buffer;
  280. unsigned long irq_flags;
  281. int pc;
  282. int syscall_nr;
  283. syscall_nr = trace_get_syscall_nr(current, regs);
  284. if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
  285. return;
  286. /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
  287. trace_file = rcu_dereference_sched(tr->exit_syscall_files[syscall_nr]);
  288. if (!trace_file)
  289. return;
  290. if (trace_trigger_soft_disabled(trace_file))
  291. return;
  292. sys_data = syscall_nr_to_meta(syscall_nr);
  293. if (!sys_data)
  294. return;
  295. local_save_flags(irq_flags);
  296. pc = preempt_count();
  297. buffer = tr->trace_buffer.buffer;
  298. event = trace_buffer_lock_reserve(buffer,
  299. sys_data->exit_event->event.type, sizeof(*entry),
  300. irq_flags, pc);
  301. if (!event)
  302. return;
  303. entry = ring_buffer_event_data(event);
  304. entry->nr = syscall_nr;
  305. entry->ret = syscall_get_return_value(current, regs);
  306. event_trigger_unlock_commit(trace_file, buffer, event, entry,
  307. irq_flags, pc);
  308. }
  309. static int reg_event_syscall_enter(struct trace_event_file *file,
  310. struct trace_event_call *call)
  311. {
  312. struct trace_array *tr = file->tr;
  313. int ret = 0;
  314. int num;
  315. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  316. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  317. return -ENOSYS;
  318. mutex_lock(&syscall_trace_lock);
  319. if (!tr->sys_refcount_enter)
  320. ret = register_trace_sys_enter(ftrace_syscall_enter, tr);
  321. if (!ret) {
  322. rcu_assign_pointer(tr->enter_syscall_files[num], file);
  323. tr->sys_refcount_enter++;
  324. }
  325. mutex_unlock(&syscall_trace_lock);
  326. return ret;
  327. }
  328. static void unreg_event_syscall_enter(struct trace_event_file *file,
  329. struct trace_event_call *call)
  330. {
  331. struct trace_array *tr = file->tr;
  332. int num;
  333. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  334. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  335. return;
  336. mutex_lock(&syscall_trace_lock);
  337. tr->sys_refcount_enter--;
  338. RCU_INIT_POINTER(tr->enter_syscall_files[num], NULL);
  339. if (!tr->sys_refcount_enter)
  340. unregister_trace_sys_enter(ftrace_syscall_enter, tr);
  341. mutex_unlock(&syscall_trace_lock);
  342. }
  343. static int reg_event_syscall_exit(struct trace_event_file *file,
  344. struct trace_event_call *call)
  345. {
  346. struct trace_array *tr = file->tr;
  347. int ret = 0;
  348. int num;
  349. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  350. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  351. return -ENOSYS;
  352. mutex_lock(&syscall_trace_lock);
  353. if (!tr->sys_refcount_exit)
  354. ret = register_trace_sys_exit(ftrace_syscall_exit, tr);
  355. if (!ret) {
  356. rcu_assign_pointer(tr->exit_syscall_files[num], file);
  357. tr->sys_refcount_exit++;
  358. }
  359. mutex_unlock(&syscall_trace_lock);
  360. return ret;
  361. }
  362. static void unreg_event_syscall_exit(struct trace_event_file *file,
  363. struct trace_event_call *call)
  364. {
  365. struct trace_array *tr = file->tr;
  366. int num;
  367. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  368. if (WARN_ON_ONCE(num < 0 || num >= NR_syscalls))
  369. return;
  370. mutex_lock(&syscall_trace_lock);
  371. tr->sys_refcount_exit--;
  372. RCU_INIT_POINTER(tr->exit_syscall_files[num], NULL);
  373. if (!tr->sys_refcount_exit)
  374. unregister_trace_sys_exit(ftrace_syscall_exit, tr);
  375. mutex_unlock(&syscall_trace_lock);
  376. }
  377. static int __init init_syscall_trace(struct trace_event_call *call)
  378. {
  379. int id;
  380. int num;
  381. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  382. if (num < 0 || num >= NR_syscalls) {
  383. pr_debug("syscall %s metadata not mapped, disabling ftrace event\n",
  384. ((struct syscall_metadata *)call->data)->name);
  385. return -ENOSYS;
  386. }
  387. if (set_syscall_print_fmt(call) < 0)
  388. return -ENOMEM;
  389. id = trace_event_raw_init(call);
  390. if (id < 0) {
  391. free_syscall_print_fmt(call);
  392. return id;
  393. }
  394. return id;
  395. }
  396. struct trace_event_functions enter_syscall_print_funcs = {
  397. .trace = print_syscall_enter,
  398. };
  399. struct trace_event_functions exit_syscall_print_funcs = {
  400. .trace = print_syscall_exit,
  401. };
  402. struct trace_event_class __refdata event_class_syscall_enter = {
  403. .system = "syscalls",
  404. .reg = syscall_enter_register,
  405. .define_fields = syscall_enter_define_fields,
  406. .get_fields = syscall_get_enter_fields,
  407. .raw_init = init_syscall_trace,
  408. };
  409. struct trace_event_class __refdata event_class_syscall_exit = {
  410. .system = "syscalls",
  411. .reg = syscall_exit_register,
  412. .define_fields = syscall_exit_define_fields,
  413. .fields = LIST_HEAD_INIT(event_class_syscall_exit.fields),
  414. .raw_init = init_syscall_trace,
  415. };
  416. unsigned long __init __weak arch_syscall_addr(int nr)
  417. {
  418. return (unsigned long)sys_call_table[nr];
  419. }
  420. void __init init_ftrace_syscalls(void)
  421. {
  422. struct syscall_metadata *meta;
  423. unsigned long addr;
  424. int i;
  425. syscalls_metadata = kcalloc(NR_syscalls, sizeof(*syscalls_metadata),
  426. GFP_KERNEL);
  427. if (!syscalls_metadata) {
  428. WARN_ON(1);
  429. return;
  430. }
  431. for (i = 0; i < NR_syscalls; i++) {
  432. addr = arch_syscall_addr(i);
  433. meta = find_syscall_meta(addr);
  434. if (!meta)
  435. continue;
  436. meta->syscall_nr = i;
  437. syscalls_metadata[i] = meta;
  438. }
  439. }
  440. #ifdef CONFIG_PERF_EVENTS
  441. static DECLARE_BITMAP(enabled_perf_enter_syscalls, NR_syscalls);
  442. static DECLARE_BITMAP(enabled_perf_exit_syscalls, NR_syscalls);
  443. static int sys_perf_refcount_enter;
  444. static int sys_perf_refcount_exit;
  445. static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
  446. {
  447. struct syscall_metadata *sys_data;
  448. struct syscall_trace_enter *rec;
  449. struct hlist_head *head;
  450. int syscall_nr;
  451. int rctx;
  452. int size;
  453. syscall_nr = trace_get_syscall_nr(current, regs);
  454. if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
  455. return;
  456. if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
  457. return;
  458. sys_data = syscall_nr_to_meta(syscall_nr);
  459. if (!sys_data)
  460. return;
  461. head = this_cpu_ptr(sys_data->enter_event->perf_events);
  462. if (hlist_empty(head))
  463. return;
  464. /* get the size after alignment with the u32 buffer size field */
  465. size = sizeof(unsigned long) * sys_data->nb_args + sizeof(*rec);
  466. size = ALIGN(size + sizeof(u32), sizeof(u64));
  467. size -= sizeof(u32);
  468. rec = (struct syscall_trace_enter *)perf_trace_buf_prepare(size,
  469. sys_data->enter_event->event.type, NULL, &rctx);
  470. if (!rec)
  471. return;
  472. rec->nr = syscall_nr;
  473. syscall_get_arguments(current, regs, 0, sys_data->nb_args,
  474. (unsigned long *)&rec->args);
  475. perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
  476. }
  477. static int perf_sysenter_enable(struct trace_event_call *call)
  478. {
  479. int ret = 0;
  480. int num;
  481. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  482. mutex_lock(&syscall_trace_lock);
  483. if (!sys_perf_refcount_enter)
  484. ret = register_trace_sys_enter(perf_syscall_enter, NULL);
  485. if (ret) {
  486. pr_info("event trace: Could not activate"
  487. "syscall entry trace point");
  488. } else {
  489. set_bit(num, enabled_perf_enter_syscalls);
  490. sys_perf_refcount_enter++;
  491. }
  492. mutex_unlock(&syscall_trace_lock);
  493. return ret;
  494. }
  495. static void perf_sysenter_disable(struct trace_event_call *call)
  496. {
  497. int num;
  498. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  499. mutex_lock(&syscall_trace_lock);
  500. sys_perf_refcount_enter--;
  501. clear_bit(num, enabled_perf_enter_syscalls);
  502. if (!sys_perf_refcount_enter)
  503. unregister_trace_sys_enter(perf_syscall_enter, NULL);
  504. mutex_unlock(&syscall_trace_lock);
  505. }
  506. static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
  507. {
  508. struct syscall_metadata *sys_data;
  509. struct syscall_trace_exit *rec;
  510. struct hlist_head *head;
  511. int syscall_nr;
  512. int rctx;
  513. int size;
  514. syscall_nr = trace_get_syscall_nr(current, regs);
  515. if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
  516. return;
  517. if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
  518. return;
  519. sys_data = syscall_nr_to_meta(syscall_nr);
  520. if (!sys_data)
  521. return;
  522. head = this_cpu_ptr(sys_data->exit_event->perf_events);
  523. if (hlist_empty(head))
  524. return;
  525. /* We can probably do that at build time */
  526. size = ALIGN(sizeof(*rec) + sizeof(u32), sizeof(u64));
  527. size -= sizeof(u32);
  528. rec = (struct syscall_trace_exit *)perf_trace_buf_prepare(size,
  529. sys_data->exit_event->event.type, NULL, &rctx);
  530. if (!rec)
  531. return;
  532. rec->nr = syscall_nr;
  533. rec->ret = syscall_get_return_value(current, regs);
  534. perf_trace_buf_submit(rec, size, rctx, 0, 1, regs, head, NULL);
  535. }
  536. static int perf_sysexit_enable(struct trace_event_call *call)
  537. {
  538. int ret = 0;
  539. int num;
  540. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  541. mutex_lock(&syscall_trace_lock);
  542. if (!sys_perf_refcount_exit)
  543. ret = register_trace_sys_exit(perf_syscall_exit, NULL);
  544. if (ret) {
  545. pr_info("event trace: Could not activate"
  546. "syscall exit trace point");
  547. } else {
  548. set_bit(num, enabled_perf_exit_syscalls);
  549. sys_perf_refcount_exit++;
  550. }
  551. mutex_unlock(&syscall_trace_lock);
  552. return ret;
  553. }
  554. static void perf_sysexit_disable(struct trace_event_call *call)
  555. {
  556. int num;
  557. num = ((struct syscall_metadata *)call->data)->syscall_nr;
  558. mutex_lock(&syscall_trace_lock);
  559. sys_perf_refcount_exit--;
  560. clear_bit(num, enabled_perf_exit_syscalls);
  561. if (!sys_perf_refcount_exit)
  562. unregister_trace_sys_exit(perf_syscall_exit, NULL);
  563. mutex_unlock(&syscall_trace_lock);
  564. }
  565. #endif /* CONFIG_PERF_EVENTS */
  566. static int syscall_enter_register(struct trace_event_call *event,
  567. enum trace_reg type, void *data)
  568. {
  569. struct trace_event_file *file = data;
  570. switch (type) {
  571. case TRACE_REG_REGISTER:
  572. return reg_event_syscall_enter(file, event);
  573. case TRACE_REG_UNREGISTER:
  574. unreg_event_syscall_enter(file, event);
  575. return 0;
  576. #ifdef CONFIG_PERF_EVENTS
  577. case TRACE_REG_PERF_REGISTER:
  578. return perf_sysenter_enable(event);
  579. case TRACE_REG_PERF_UNREGISTER:
  580. perf_sysenter_disable(event);
  581. return 0;
  582. case TRACE_REG_PERF_OPEN:
  583. case TRACE_REG_PERF_CLOSE:
  584. case TRACE_REG_PERF_ADD:
  585. case TRACE_REG_PERF_DEL:
  586. return 0;
  587. #endif
  588. }
  589. return 0;
  590. }
  591. static int syscall_exit_register(struct trace_event_call *event,
  592. enum trace_reg type, void *data)
  593. {
  594. struct trace_event_file *file = data;
  595. switch (type) {
  596. case TRACE_REG_REGISTER:
  597. return reg_event_syscall_exit(file, event);
  598. case TRACE_REG_UNREGISTER:
  599. unreg_event_syscall_exit(file, event);
  600. return 0;
  601. #ifdef CONFIG_PERF_EVENTS
  602. case TRACE_REG_PERF_REGISTER:
  603. return perf_sysexit_enable(event);
  604. case TRACE_REG_PERF_UNREGISTER:
  605. perf_sysexit_disable(event);
  606. return 0;
  607. case TRACE_REG_PERF_OPEN:
  608. case TRACE_REG_PERF_CLOSE:
  609. case TRACE_REG_PERF_ADD:
  610. case TRACE_REG_PERF_DEL:
  611. return 0;
  612. #endif
  613. }
  614. return 0;
  615. }