sw-clock.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <errno.h>
  3. #include <inttypes.h>
  4. #include <unistd.h>
  5. #include <stdlib.h>
  6. #include <signal.h>
  7. #include <sys/mman.h>
  8. #include "tests.h"
  9. #include "util/evsel.h"
  10. #include "util/evlist.h"
  11. #include "util/cpumap.h"
  12. #include "util/thread_map.h"
  13. #define NR_LOOPS 10000000
  14. /*
  15. * This test will open software clock events (cpu-clock, task-clock)
  16. * then check their frequency -> period conversion has no artifact of
  17. * setting period to 1 forcefully.
  18. */
  19. static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
  20. {
  21. int i, err = -1;
  22. volatile int tmp = 0;
  23. u64 total_periods = 0;
  24. int nr_samples = 0;
  25. char sbuf[STRERR_BUFSIZE];
  26. union perf_event *event;
  27. struct perf_evsel *evsel;
  28. struct perf_evlist *evlist;
  29. struct perf_event_attr attr = {
  30. .type = PERF_TYPE_SOFTWARE,
  31. .config = clock_id,
  32. .sample_type = PERF_SAMPLE_PERIOD,
  33. .exclude_kernel = 1,
  34. .disabled = 1,
  35. .freq = 1,
  36. };
  37. struct cpu_map *cpus;
  38. struct thread_map *threads;
  39. struct perf_mmap *md;
  40. attr.sample_freq = 500;
  41. evlist = perf_evlist__new();
  42. if (evlist == NULL) {
  43. pr_debug("perf_evlist__new\n");
  44. return -1;
  45. }
  46. evsel = perf_evsel__new(&attr);
  47. if (evsel == NULL) {
  48. pr_debug("perf_evsel__new\n");
  49. goto out_delete_evlist;
  50. }
  51. perf_evlist__add(evlist, evsel);
  52. cpus = cpu_map__dummy_new();
  53. threads = thread_map__new_by_tid(getpid());
  54. if (!cpus || !threads) {
  55. err = -ENOMEM;
  56. pr_debug("Not enough memory to create thread/cpu maps\n");
  57. goto out_free_maps;
  58. }
  59. perf_evlist__set_maps(evlist, cpus, threads);
  60. cpus = NULL;
  61. threads = NULL;
  62. if (perf_evlist__open(evlist)) {
  63. const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate";
  64. err = -errno;
  65. pr_debug("Couldn't open evlist: %s\nHint: check %s, using %" PRIu64 " in this test.\n",
  66. str_error_r(errno, sbuf, sizeof(sbuf)),
  67. knob, (u64)attr.sample_freq);
  68. goto out_delete_evlist;
  69. }
  70. err = perf_evlist__mmap(evlist, 128);
  71. if (err < 0) {
  72. pr_debug("failed to mmap event: %d (%s)\n", errno,
  73. str_error_r(errno, sbuf, sizeof(sbuf)));
  74. goto out_delete_evlist;
  75. }
  76. perf_evlist__enable(evlist);
  77. /* collect samples */
  78. for (i = 0; i < NR_LOOPS; i++)
  79. tmp++;
  80. perf_evlist__disable(evlist);
  81. md = &evlist->mmap[0];
  82. if (perf_mmap__read_init(md) < 0)
  83. goto out_init;
  84. while ((event = perf_mmap__read_event(md)) != NULL) {
  85. struct perf_sample sample;
  86. if (event->header.type != PERF_RECORD_SAMPLE)
  87. goto next_event;
  88. err = perf_evlist__parse_sample(evlist, event, &sample);
  89. if (err < 0) {
  90. pr_debug("Error during parse sample\n");
  91. goto out_delete_evlist;
  92. }
  93. total_periods += sample.period;
  94. nr_samples++;
  95. next_event:
  96. perf_mmap__consume(md);
  97. }
  98. perf_mmap__read_done(md);
  99. out_init:
  100. if ((u64) nr_samples == total_periods) {
  101. pr_debug("All (%d) samples have period value of 1!\n",
  102. nr_samples);
  103. err = -1;
  104. }
  105. out_free_maps:
  106. cpu_map__put(cpus);
  107. thread_map__put(threads);
  108. out_delete_evlist:
  109. perf_evlist__delete(evlist);
  110. return err;
  111. }
  112. int test__sw_clock_freq(struct test *test __maybe_unused, int subtest __maybe_unused)
  113. {
  114. int ret;
  115. ret = __test__sw_clock_freq(PERF_COUNT_SW_CPU_CLOCK);
  116. if (!ret)
  117. ret = __test__sw_clock_freq(PERF_COUNT_SW_TASK_CLOCK);
  118. return ret;
  119. }