get_cgroup_id_user.c 3.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150
  1. // SPDX-License-Identifier: GPL-2.0
  2. // Copyright (c) 2018 Facebook
  3. #include <stdio.h>
  4. #include <stdlib.h>
  5. #include <string.h>
  6. #include <errno.h>
  7. #include <fcntl.h>
  8. #include <syscall.h>
  9. #include <unistd.h>
  10. #include <linux/perf_event.h>
  11. #include <sys/ioctl.h>
  12. #include <sys/time.h>
  13. #include <sys/types.h>
  14. #include <sys/stat.h>
  15. #include <linux/bpf.h>
  16. #include <bpf/bpf.h>
  17. #include <bpf/libbpf.h>
  18. #include "cgroup_helpers.h"
  19. #include "bpf_rlimit.h"
  20. #define CHECK(condition, tag, format...) ({ \
  21. int __ret = !!(condition); \
  22. if (__ret) { \
  23. printf("%s:FAIL:%s ", __func__, tag); \
  24. printf(format); \
  25. } else { \
  26. printf("%s:PASS:%s\n", __func__, tag); \
  27. } \
  28. __ret; \
  29. })
  30. static int bpf_find_map(const char *test, struct bpf_object *obj,
  31. const char *name)
  32. {
  33. struct bpf_map *map;
  34. map = bpf_object__find_map_by_name(obj, name);
  35. if (!map)
  36. return -1;
  37. return bpf_map__fd(map);
  38. }
  39. #define TEST_CGROUP "/test-bpf-get-cgroup-id/"
  40. int main(int argc, char **argv)
  41. {
  42. const char *probe_name = "syscalls/sys_enter_nanosleep";
  43. const char *file = "get_cgroup_id_kern.o";
  44. int err, bytes, efd, prog_fd, pmu_fd;
  45. int cgroup_fd, cgidmap_fd, pidmap_fd;
  46. struct perf_event_attr attr = {};
  47. struct bpf_object *obj;
  48. __u64 kcgid = 0, ucgid;
  49. __u32 key = 0, pid;
  50. int exit_code = 1;
  51. char buf[256];
  52. err = setup_cgroup_environment();
  53. if (CHECK(err, "setup_cgroup_environment", "err %d errno %d\n", err,
  54. errno))
  55. return 1;
  56. cgroup_fd = create_and_get_cgroup(TEST_CGROUP);
  57. if (CHECK(cgroup_fd < 0, "create_and_get_cgroup", "err %d errno %d\n",
  58. cgroup_fd, errno))
  59. goto cleanup_cgroup_env;
  60. err = join_cgroup(TEST_CGROUP);
  61. if (CHECK(err, "join_cgroup", "err %d errno %d\n", err, errno))
  62. goto cleanup_cgroup_env;
  63. err = bpf_prog_load(file, BPF_PROG_TYPE_TRACEPOINT, &obj, &prog_fd);
  64. if (CHECK(err, "bpf_prog_load", "err %d errno %d\n", err, errno))
  65. goto cleanup_cgroup_env;
  66. cgidmap_fd = bpf_find_map(__func__, obj, "cg_ids");
  67. if (CHECK(cgidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
  68. cgidmap_fd, errno))
  69. goto close_prog;
  70. pidmap_fd = bpf_find_map(__func__, obj, "pidmap");
  71. if (CHECK(pidmap_fd < 0, "bpf_find_map", "err %d errno %d\n",
  72. pidmap_fd, errno))
  73. goto close_prog;
  74. pid = getpid();
  75. bpf_map_update_elem(pidmap_fd, &key, &pid, 0);
  76. snprintf(buf, sizeof(buf),
  77. "/sys/kernel/debug/tracing/events/%s/id", probe_name);
  78. efd = open(buf, O_RDONLY, 0);
  79. if (CHECK(efd < 0, "open", "err %d errno %d\n", efd, errno))
  80. goto close_prog;
  81. bytes = read(efd, buf, sizeof(buf));
  82. close(efd);
  83. if (CHECK(bytes <= 0 || bytes >= sizeof(buf), "read",
  84. "bytes %d errno %d\n", bytes, errno))
  85. goto close_prog;
  86. attr.config = strtol(buf, NULL, 0);
  87. attr.type = PERF_TYPE_TRACEPOINT;
  88. attr.sample_type = PERF_SAMPLE_RAW;
  89. attr.sample_period = 1;
  90. attr.wakeup_events = 1;
  91. /* attach to this pid so the all bpf invocations will be in the
  92. * cgroup associated with this pid.
  93. */
  94. pmu_fd = syscall(__NR_perf_event_open, &attr, getpid(), -1, -1, 0);
  95. if (CHECK(pmu_fd < 0, "perf_event_open", "err %d errno %d\n", pmu_fd,
  96. errno))
  97. goto close_prog;
  98. err = ioctl(pmu_fd, PERF_EVENT_IOC_ENABLE, 0);
  99. if (CHECK(err, "perf_event_ioc_enable", "err %d errno %d\n", err,
  100. errno))
  101. goto close_pmu;
  102. err = ioctl(pmu_fd, PERF_EVENT_IOC_SET_BPF, prog_fd);
  103. if (CHECK(err, "perf_event_ioc_set_bpf", "err %d errno %d\n", err,
  104. errno))
  105. goto close_pmu;
  106. /* trigger some syscalls */
  107. sleep(1);
  108. err = bpf_map_lookup_elem(cgidmap_fd, &key, &kcgid);
  109. if (CHECK(err, "bpf_map_lookup_elem", "err %d errno %d\n", err, errno))
  110. goto close_pmu;
  111. ucgid = get_cgroup_id(TEST_CGROUP);
  112. if (CHECK(kcgid != ucgid, "compare_cgroup_id",
  113. "kern cgid %llx user cgid %llx", kcgid, ucgid))
  114. goto close_pmu;
  115. exit_code = 0;
  116. printf("%s:PASS\n", argv[0]);
  117. close_pmu:
  118. close(pmu_fd);
  119. close_prog:
  120. bpf_object__close(obj);
  121. cleanup_cgroup_env:
  122. cleanup_cgroup_environment();
  123. return exit_code;
  124. }