test_get_stack_rawtp.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/bpf.h>
  3. #include "bpf_helpers.h"
  4. /* Permit pretty deep stack traces */
  5. #define MAX_STACK_RAWTP 100
  6. struct stack_trace_t {
  7. int pid;
  8. int kern_stack_size;
  9. int user_stack_size;
  10. int user_stack_buildid_size;
  11. __u64 kern_stack[MAX_STACK_RAWTP];
  12. __u64 user_stack[MAX_STACK_RAWTP];
  13. struct bpf_stack_build_id user_stack_buildid[MAX_STACK_RAWTP];
  14. };
  15. struct bpf_map_def SEC("maps") perfmap = {
  16. .type = BPF_MAP_TYPE_PERF_EVENT_ARRAY,
  17. .key_size = sizeof(int),
  18. .value_size = sizeof(__u32),
  19. .max_entries = 2,
  20. };
  21. struct bpf_map_def SEC("maps") stackdata_map = {
  22. .type = BPF_MAP_TYPE_PERCPU_ARRAY,
  23. .key_size = sizeof(__u32),
  24. .value_size = sizeof(struct stack_trace_t),
  25. .max_entries = 1,
  26. };
  27. /* Allocate per-cpu space twice the needed. For the code below
  28. * usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
  29. * if (usize < 0)
  30. * return 0;
  31. * ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
  32. *
  33. * If we have value_size = MAX_STACK_RAWTP * sizeof(__u64),
  34. * verifier will complain that access "raw_data + usize"
  35. * with size "max_len - usize" may be out of bound.
  36. * The maximum "raw_data + usize" is "raw_data + max_len"
  37. * and the maximum "max_len - usize" is "max_len", verifier
  38. * concludes that the maximum buffer access range is
  39. * "raw_data[0...max_len * 2 - 1]" and hence reject the program.
  40. *
  41. * Doubling the to-be-used max buffer size can fix this verifier
  42. * issue and avoid complicated C programming massaging.
  43. * This is an acceptable workaround since there is one entry here.
  44. */
  45. struct bpf_map_def SEC("maps") rawdata_map = {
  46. .type = BPF_MAP_TYPE_PERCPU_ARRAY,
  47. .key_size = sizeof(__u32),
  48. .value_size = MAX_STACK_RAWTP * sizeof(__u64) * 2,
  49. .max_entries = 1,
  50. };
  51. SEC("tracepoint/raw_syscalls/sys_enter")
  52. int bpf_prog1(void *ctx)
  53. {
  54. int max_len, max_buildid_len, usize, ksize, total_size;
  55. struct stack_trace_t *data;
  56. void *raw_data;
  57. __u32 key = 0;
  58. data = bpf_map_lookup_elem(&stackdata_map, &key);
  59. if (!data)
  60. return 0;
  61. max_len = MAX_STACK_RAWTP * sizeof(__u64);
  62. max_buildid_len = MAX_STACK_RAWTP * sizeof(struct bpf_stack_build_id);
  63. data->pid = bpf_get_current_pid_tgid();
  64. data->kern_stack_size = bpf_get_stack(ctx, data->kern_stack,
  65. max_len, 0);
  66. data->user_stack_size = bpf_get_stack(ctx, data->user_stack, max_len,
  67. BPF_F_USER_STACK);
  68. data->user_stack_buildid_size = bpf_get_stack(
  69. ctx, data->user_stack_buildid, max_buildid_len,
  70. BPF_F_USER_STACK | BPF_F_USER_BUILD_ID);
  71. bpf_perf_event_output(ctx, &perfmap, 0, data, sizeof(*data));
  72. /* write both kernel and user stacks to the same buffer */
  73. raw_data = bpf_map_lookup_elem(&rawdata_map, &key);
  74. if (!raw_data)
  75. return 0;
  76. usize = bpf_get_stack(ctx, raw_data, max_len, BPF_F_USER_STACK);
  77. if (usize < 0)
  78. return 0;
  79. ksize = bpf_get_stack(ctx, raw_data + usize, max_len - usize, 0);
  80. if (ksize < 0)
  81. return 0;
  82. total_size = usize + ksize;
  83. if (total_size > 0 && total_size <= max_len)
  84. bpf_perf_event_output(ctx, &perfmap, 0, raw_data, total_size);
  85. return 0;
  86. }
  87. char _license[] SEC("license") = "GPL";
  88. __u32 _version SEC("version") = 1; /* ignored by tracepoints, required by libbpf.a */