test_perfmon_thread.c 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. /*
  2. * Copyright (c) 2014-2018 Remy Noel.
  3. * Copyright (c) 2014-2018 Richard Braun.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. *
  19. * This test checks the behavior of performance monitoring on a thread.
  20. * It creates a group with a single event, cycle, and attaches that group to
  21. * a runner thread. Two checks are then performed :
  22. * - the first makes sure the number of cycles changes when the runner
  23. * thread is running
  24. * - the second makes sure the number of cycles doesn't change when the
  25. * runner thread is sleeping
  26. *
  27. * Another group with a cycle event is created and attached to CPU0 to make
  28. * sure that a shared event is correctly handled, and the runner thread is
  29. * bound to CPU0 to force sharing. A third thread is created to fill CPU0
  30. * time with cycles so that the cycle counter of the CPU-attached group
  31. * changes while the runner thread is sleeping.
  32. */
  33. #include <assert.h>
  34. #include <stdbool.h>
  35. #include <stddef.h>
  36. #include <string.h>
  37. #include <kern/atomic.h>
  38. #include <kern/clock.h>
  39. #include <kern/condition.h>
  40. #include <kern/cpumap.h>
  41. #include <kern/error.h>
  42. #include <kern/kmem.h>
  43. #include <kern/list.h>
  44. #include <kern/log.h>
  45. #include <kern/mutex.h>
  46. #include <kern/panic.h>
  47. #include <kern/perfmon.h>
  48. #include <kern/thread.h>
  49. #include <test/test.h>
  50. #define TEST_WAIT_DELAY_MS 1000
  51. #define TEST_EVENT_NAME_MAX_SIZE 32
  52. struct test_event {
  53. struct list node;
  54. struct perfmon_event pm_event;
  55. uint64_t last_value;
  56. char name[TEST_EVENT_NAME_MAX_SIZE];
  57. };
  58. struct test_group {
  59. struct list events;
  60. };
  61. enum test_state {
  62. TEST_STATE_RUNNING,
  63. TEST_STATE_SUSPENDED,
  64. TEST_STATE_TERMINATED,
  65. };
  66. static struct condition test_condition;
  67. static struct mutex test_mutex;
  68. static enum test_state test_state;
  69. static void
  70. test_wait(void)
  71. {
  72. log_info("test: controller waiting");
  73. thread_delay(clock_ticks_from_ms(TEST_WAIT_DELAY_MS), false);
  74. log_info("test: controller resuming");
  75. }
  76. static void
  77. test_event_init(struct test_event *event, unsigned int id, const char *name)
  78. {
  79. int error;
  80. error = perfmon_event_init(&event->pm_event, id, PERFMON_EF_KERN);
  81. error_check(error, "perfmon_event_init");
  82. strlcpy(event->name, name, sizeof(event->name));
  83. }
  84. static void
  85. test_event_attach(struct test_event *event, struct thread *thread)
  86. {
  87. int error;
  88. error = perfmon_event_attach(&event->pm_event, thread);
  89. error_check(error, "perfmon_event_attach");
  90. }
  91. static void
  92. test_event_attach_cpu(struct test_event *event, unsigned int cpu)
  93. {
  94. int error;
  95. error = perfmon_event_attach_cpu(&event->pm_event, cpu);
  96. error_check(error, "perfmon_event_attach_cpu");
  97. }
  98. static void
  99. test_event_detach(struct test_event *event)
  100. {
  101. int error;
  102. error = perfmon_event_detach(&event->pm_event);
  103. error_check(error, "perfmon_event_detach");
  104. }
  105. static uint64_t
  106. test_event_read(struct test_event *event)
  107. {
  108. uint64_t value;
  109. value = perfmon_event_read(&event->pm_event);
  110. log_info("test: %s: %llu", event->name, (unsigned long long)value);
  111. return value;
  112. }
  113. static void
  114. test_event_save(struct test_event *event)
  115. {
  116. event->last_value = test_event_read(event);
  117. }
  118. static void
  119. test_event_check(struct test_event *event, bool change_expected)
  120. {
  121. uint64_t value;
  122. bool changed;
  123. value = test_event_read(event);
  124. changed = (value != event->last_value);
  125. if (changed != change_expected) {
  126. panic("test: invalid value");
  127. }
  128. event->last_value = value;
  129. }
  130. static void
  131. test_group_init(struct test_group *group)
  132. {
  133. list_init(&group->events);
  134. }
  135. static void
  136. test_group_add(struct test_group *group, struct test_event *event)
  137. {
  138. list_insert_tail(&group->events, &event->node);
  139. }
  140. static void
  141. test_group_attach(struct test_group *group, struct thread *thread)
  142. {
  143. struct test_event *event;
  144. list_for_each_entry(&group->events, event, node) {
  145. test_event_attach(event, thread);
  146. }
  147. }
  148. static void
  149. test_group_attach_cpu(struct test_group *group, unsigned int cpu)
  150. {
  151. struct test_event *event;
  152. list_for_each_entry(&group->events, event, node) {
  153. test_event_attach_cpu(event, cpu);
  154. }
  155. }
  156. static void
  157. test_group_detach(struct test_group *group)
  158. {
  159. struct test_event *event;
  160. list_for_each_entry(&group->events, event, node) {
  161. test_event_detach(event);
  162. }
  163. }
  164. static void
  165. test_group_save(struct test_group *group)
  166. {
  167. struct test_event *event;
  168. list_for_each_entry(&group->events, event, node) {
  169. test_event_save(event);
  170. }
  171. }
  172. static void
  173. test_group_check(struct test_group *group, bool change_expected)
  174. {
  175. struct test_event *event;
  176. list_for_each_entry(&group->events, event, node) {
  177. test_event_check(event, change_expected);
  178. }
  179. }
  180. static void
  181. test_run(void *arg)
  182. {
  183. bool report;
  184. (void)arg;
  185. report = true;
  186. mutex_lock(&test_mutex);
  187. while (test_state != TEST_STATE_TERMINATED) {
  188. if (test_state == TEST_STATE_SUSPENDED) {
  189. log_info("test: runner suspended");
  190. report = true;
  191. condition_wait(&test_condition, &test_mutex);
  192. } else {
  193. mutex_unlock(&test_mutex);
  194. if (report) {
  195. log_info("test: runner running");
  196. report = false;
  197. }
  198. mutex_lock(&test_mutex);
  199. }
  200. }
  201. mutex_unlock(&test_mutex);
  202. }
  203. static void
  204. test_fill(void *arg)
  205. {
  206. enum test_state state;
  207. (void)arg;
  208. do {
  209. state = atomic_load(&test_state, ATOMIC_RELAXED);
  210. } while (state != TEST_STATE_TERMINATED);
  211. }
  212. static void
  213. test_wait_state(const struct thread *thread, unsigned short state)
  214. {
  215. for (;;) {
  216. if (thread_state(thread) == state) {
  217. break;
  218. }
  219. thread_delay(1, false);
  220. }
  221. }
  222. static void
  223. test_resume(struct thread *thread)
  224. {
  225. test_wait_state(thread, THREAD_SLEEPING);
  226. mutex_lock(&test_mutex);
  227. assert(test_state == TEST_STATE_SUSPENDED);
  228. atomic_store(&test_state, TEST_STATE_RUNNING, ATOMIC_RELAXED);
  229. condition_signal(&test_condition);
  230. mutex_unlock(&test_mutex);
  231. test_wait_state(thread, THREAD_RUNNING);
  232. }
  233. static void
  234. test_suspend(struct thread *thread)
  235. {
  236. test_wait_state(thread, THREAD_RUNNING);
  237. mutex_lock(&test_mutex);
  238. assert(test_state == TEST_STATE_RUNNING);
  239. atomic_store(&test_state, TEST_STATE_SUSPENDED, ATOMIC_RELAXED);
  240. mutex_unlock(&test_mutex);
  241. test_wait_state(thread, THREAD_SLEEPING);
  242. }
  243. static void
  244. test_terminate(void)
  245. {
  246. mutex_lock(&test_mutex);
  247. test_state = TEST_STATE_TERMINATED;
  248. condition_signal(&test_condition);
  249. mutex_unlock(&test_mutex);
  250. }
  251. static void
  252. test_control(void *arg)
  253. {
  254. struct test_event thread_cycle, cpu_cycle;
  255. struct test_group thread_group, cpu_group;
  256. struct thread *runner;
  257. runner = arg;
  258. test_event_init(&thread_cycle, PERFMON_EV_CYCLE, "thread_cycle");
  259. test_group_init(&thread_group);
  260. test_group_add(&thread_group, &thread_cycle);
  261. test_event_init(&cpu_cycle, PERFMON_EV_CYCLE, "cpu_cycle");
  262. test_group_init(&cpu_group);
  263. test_group_add(&cpu_group, &cpu_cycle);
  264. test_group_attach(&thread_group, runner);
  265. test_group_attach_cpu(&cpu_group, 0);
  266. test_group_save(&thread_group);
  267. test_group_save(&cpu_group);
  268. test_resume(runner);
  269. test_wait();
  270. test_suspend(runner);
  271. test_group_check(&thread_group, true);
  272. test_group_check(&cpu_group, true);
  273. test_wait();
  274. test_group_check(&thread_group, false);
  275. test_group_check(&cpu_group, true);
  276. test_terminate();
  277. test_group_detach(&cpu_group);
  278. test_group_detach(&thread_group);
  279. thread_join(runner);
  280. log_info("test: done");
  281. }
  282. void
  283. test_setup(void)
  284. {
  285. struct thread_attr attr;
  286. struct thread *runner;
  287. struct cpumap *cpumap;
  288. int error;
  289. condition_init(&test_condition);
  290. mutex_init(&test_mutex);
  291. test_state = TEST_STATE_SUSPENDED;
  292. error = cpumap_create(&cpumap);
  293. error_check(error, "cpumap_create");
  294. cpumap_zero(cpumap);
  295. cpumap_set(cpumap, 0);
  296. thread_attr_init(&attr, THREAD_KERNEL_PREFIX "test_run");
  297. thread_attr_set_cpumap(&attr, cpumap);
  298. error = thread_create(&runner, &attr, test_run, NULL);
  299. error_check(error, "thread_create");
  300. thread_attr_init(&attr, THREAD_KERNEL_PREFIX "test_fill");
  301. thread_attr_set_detached(&attr);
  302. thread_attr_set_cpumap(&attr, cpumap);
  303. thread_attr_set_priority(&attr, THREAD_SCHED_FS_PRIO_MIN);
  304. error = thread_create(NULL, &attr, test_fill, NULL);
  305. error_check(error, "thread_create");
  306. thread_attr_init(&attr, THREAD_KERNEL_PREFIX "test_control");
  307. thread_attr_set_detached(&attr);
  308. error = thread_create(NULL, &attr, test_control, runner);
  309. error_check(error, "thread_create");
  310. cpumap_destroy(cpumap);
  311. }