intel-cqm.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include "tests/tests.h"
  3. #include "perf.h"
  4. #include "cloexec.h"
  5. #include "debug.h"
  6. #include "evlist.h"
  7. #include "evsel.h"
  8. #include "arch-tests.h"
  9. #include <signal.h>
  10. #include <sys/mman.h>
  11. #include <sys/wait.h>
  12. #include <errno.h>
  13. #include <string.h>
  14. static pid_t spawn(void)
  15. {
  16. pid_t pid;
  17. pid = fork();
  18. if (pid)
  19. return pid;
  20. while(1)
  21. sleep(5);
  22. return 0;
  23. }
  24. /*
  25. * Create an event group that contains both a sampled hardware
  26. * (cpu-cycles) and software (intel_cqm/llc_occupancy/) event. We then
  27. * wait for the hardware perf counter to overflow and generate a PMI,
  28. * which triggers an event read for both of the events in the group.
  29. *
  30. * Since reading Intel CQM event counters requires sending SMP IPIs, the
  31. * CQM pmu needs to handle the above situation gracefully, and return
  32. * the last read counter value to avoid triggering a WARN_ON_ONCE() in
  33. * smp_call_function_many() caused by sending IPIs from NMI context.
  34. */
  35. int test__intel_cqm_count_nmi_context(struct test *test __maybe_unused, int subtest __maybe_unused)
  36. {
  37. struct perf_evlist *evlist = NULL;
  38. struct perf_evsel *evsel = NULL;
  39. struct perf_event_attr pe;
  40. int i, fd[2], flag, ret;
  41. size_t mmap_len;
  42. void *event;
  43. pid_t pid;
  44. int err = TEST_FAIL;
  45. flag = perf_event_open_cloexec_flag();
  46. evlist = perf_evlist__new();
  47. if (!evlist) {
  48. pr_debug("perf_evlist__new failed\n");
  49. return TEST_FAIL;
  50. }
  51. ret = parse_events(evlist, "intel_cqm/llc_occupancy/", NULL);
  52. if (ret) {
  53. pr_debug("parse_events failed, is \"intel_cqm/llc_occupancy/\" available?\n");
  54. err = TEST_SKIP;
  55. goto out;
  56. }
  57. evsel = perf_evlist__first(evlist);
  58. if (!evsel) {
  59. pr_debug("perf_evlist__first failed\n");
  60. goto out;
  61. }
  62. memset(&pe, 0, sizeof(pe));
  63. pe.size = sizeof(pe);
  64. pe.type = PERF_TYPE_HARDWARE;
  65. pe.config = PERF_COUNT_HW_CPU_CYCLES;
  66. pe.read_format = PERF_FORMAT_GROUP;
  67. pe.sample_period = 128;
  68. pe.sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_READ;
  69. pid = spawn();
  70. fd[0] = sys_perf_event_open(&pe, pid, -1, -1, flag);
  71. if (fd[0] < 0) {
  72. pr_debug("failed to open event\n");
  73. goto out;
  74. }
  75. memset(&pe, 0, sizeof(pe));
  76. pe.size = sizeof(pe);
  77. pe.type = evsel->attr.type;
  78. pe.config = evsel->attr.config;
  79. fd[1] = sys_perf_event_open(&pe, pid, -1, fd[0], flag);
  80. if (fd[1] < 0) {
  81. pr_debug("failed to open event\n");
  82. goto out;
  83. }
  84. /*
  85. * Pick a power-of-two number of pages + 1 for the meta-data
  86. * page (struct perf_event_mmap_page). See tools/perf/design.txt.
  87. */
  88. mmap_len = page_size * 65;
  89. event = mmap(NULL, mmap_len, PROT_READ, MAP_SHARED, fd[0], 0);
  90. if (event == (void *)(-1)) {
  91. pr_debug("failed to mmap %d\n", errno);
  92. goto out;
  93. }
  94. sleep(1);
  95. err = TEST_OK;
  96. munmap(event, mmap_len);
  97. for (i = 0; i < 2; i++)
  98. close(fd[i]);
  99. kill(pid, SIGKILL);
  100. wait(NULL);
  101. out:
  102. perf_evlist__delete(evlist);
  103. return err;
  104. }