openat-syscall-all-cpus.c 3.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <errno.h>
  3. #include <inttypes.h>
  4. /* For the CPU_* macros */
  5. #include <pthread.h>
  6. #include <sys/types.h>
  7. #include <sys/stat.h>
  8. #include <fcntl.h>
  9. #include <api/fs/fs.h>
  10. #include <linux/err.h>
  11. #include <api/fs/tracing_path.h>
  12. #include "evsel.h"
  13. #include "tests.h"
  14. #include "thread_map.h"
  15. #include "cpumap.h"
  16. #include "debug.h"
  17. #include "stat.h"
  18. int test__openat_syscall_event_on_all_cpus(struct test *test __maybe_unused, int subtest __maybe_unused)
  19. {
  20. int err = -1, fd, cpu;
  21. struct cpu_map *cpus;
  22. struct perf_evsel *evsel;
  23. unsigned int nr_openat_calls = 111, i;
  24. cpu_set_t cpu_set;
  25. struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
  26. char sbuf[STRERR_BUFSIZE];
  27. char errbuf[BUFSIZ];
  28. if (threads == NULL) {
  29. pr_debug("thread_map__new\n");
  30. return -1;
  31. }
  32. cpus = cpu_map__new(NULL);
  33. if (cpus == NULL) {
  34. pr_debug("cpu_map__new\n");
  35. goto out_thread_map_delete;
  36. }
  37. CPU_ZERO(&cpu_set);
  38. evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
  39. if (IS_ERR(evsel)) {
  40. tracing_path__strerror_open_tp(errno, errbuf, sizeof(errbuf), "syscalls", "sys_enter_openat");
  41. pr_debug("%s\n", errbuf);
  42. goto out_cpu_map_delete;
  43. }
  44. if (perf_evsel__open(evsel, cpus, threads) < 0) {
  45. pr_debug("failed to open counter: %s, "
  46. "tweak /proc/sys/kernel/perf_event_paranoid?\n",
  47. str_error_r(errno, sbuf, sizeof(sbuf)));
  48. goto out_evsel_delete;
  49. }
  50. for (cpu = 0; cpu < cpus->nr; ++cpu) {
  51. unsigned int ncalls = nr_openat_calls + cpu;
  52. /*
  53. * XXX eventually lift this restriction in a way that
  54. * keeps perf building on older glibc installations
  55. * without CPU_ALLOC. 1024 cpus in 2010 still seems
  56. * a reasonable upper limit tho :-)
  57. */
  58. if (cpus->map[cpu] >= CPU_SETSIZE) {
  59. pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
  60. continue;
  61. }
  62. CPU_SET(cpus->map[cpu], &cpu_set);
  63. if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
  64. pr_debug("sched_setaffinity() failed on CPU %d: %s ",
  65. cpus->map[cpu],
  66. str_error_r(errno, sbuf, sizeof(sbuf)));
  67. goto out_close_fd;
  68. }
  69. for (i = 0; i < ncalls; ++i) {
  70. fd = openat(0, "/etc/passwd", O_RDONLY);
  71. close(fd);
  72. }
  73. CPU_CLR(cpus->map[cpu], &cpu_set);
  74. }
  75. /*
  76. * Here we need to explicitly preallocate the counts, as if
  77. * we use the auto allocation it will allocate just for 1 cpu,
  78. * as we start by cpu 0.
  79. */
  80. if (perf_evsel__alloc_counts(evsel, cpus->nr, 1) < 0) {
  81. pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
  82. goto out_close_fd;
  83. }
  84. err = 0;
  85. for (cpu = 0; cpu < cpus->nr; ++cpu) {
  86. unsigned int expected;
  87. if (cpus->map[cpu] >= CPU_SETSIZE)
  88. continue;
  89. if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
  90. pr_debug("perf_evsel__read_on_cpu\n");
  91. err = -1;
  92. break;
  93. }
  94. expected = nr_openat_calls + cpu;
  95. if (perf_counts(evsel->counts, cpu, 0)->val != expected) {
  96. pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
  97. expected, cpus->map[cpu], perf_counts(evsel->counts, cpu, 0)->val);
  98. err = -1;
  99. }
  100. }
  101. perf_evsel__free_counts(evsel);
  102. out_close_fd:
  103. perf_evsel__close_fd(evsel);
  104. out_evsel_delete:
  105. perf_evsel__delete(evsel);
  106. out_cpu_map_delete:
  107. cpu_map__put(cpus);
  108. out_thread_map_delete:
  109. thread_map__put(threads);
  110. return err;
  111. }