test_perfmon_thread.c 8.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357
  1. /*
  2. * Copyright (c) 2014-2018 Remy Noel.
  3. * Copyright (c) 2014-2018 Richard Braun.
  4. *
  5. * This program is free software: you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation, either version 3 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  13. * GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  17. *
  18. *
  19. * This test checks the behavior of performance monitoring on a thread.
  20. * It creates a group with a single event, cycle, and attaches that group to
  21. * a runner thread. Two checks are then performed :
  22. * - the first makes sure the number of cycles changes when the runner
  23. * thread is running
  24. * - the second makes sure the number of cycles doesn't change when the
  25. * runner thread is sleeping
  26. *
  27. * Another group with a cycle event is created and attached to CPU0 to make
  28. * sure that a shared event is correctly handled, and the runner thread is
  29. * bound to CPU0 to force sharing. A third thread is created to fill CPU0
  30. * time with cycles so that the cycle counter of the CPU-attached group
  31. * changes while the runner thread is sleeping.
  32. */
  33. #include <assert.h>
  34. #include <stdbool.h>
  35. #include <stddef.h>
  36. #include <string.h>
  37. #include <kern/atomic.h>
  38. #include <kern/clock.h>
  39. #include <kern/condition.h>
  40. #include <kern/cpumap.h>
  41. #include <kern/error.h>
  42. #include <kern/kmem.h>
  43. #include <kern/list.h>
  44. #include <kern/log.h>
  45. #include <kern/mutex.h>
  46. #include <kern/panic.h>
  47. #include <kern/perfmon.h>
  48. #include <kern/thread.h>
  49. #include <test/test.h>
  50. #define TEST_WAIT_DELAY_MS 1000
  51. #define TEST_EVENT_NAME_MAX_SIZE 32
  52. struct test_event
  53. {
  54. struct list node;
  55. struct perfmon_event pm_event;
  56. uint64_t last_value;
  57. char name[TEST_EVENT_NAME_MAX_SIZE];
  58. };
  59. struct test_group
  60. {
  61. struct list events;
  62. };
  63. enum test_state
  64. {
  65. TEST_STATE_RUNNING,
  66. TEST_STATE_SUSPENDED,
  67. TEST_STATE_TERMINATED,
  68. };
  69. static struct condition test_condition;
  70. static struct mutex test_mutex;
  71. static enum test_state test_state;
  72. static void
  73. test_wait (void)
  74. {
  75. log_info ("test: controller waiting");
  76. thread_delay (clock_ticks_from_ms (TEST_WAIT_DELAY_MS), false);
  77. log_info ("test: controller resuming");
  78. }
  79. static void
  80. test_event_init (struct test_event *event, uint32_t id, const char *name)
  81. {
  82. int error = perfmon_event_init (&event->pm_event, id, PERFMON_EF_KERN);
  83. error_check (error, "perfmon_event_init");
  84. strlcpy (event->name, name, sizeof (event->name));
  85. }
  86. static void
  87. test_event_attach (struct test_event *event, struct thread *thread)
  88. {
  89. int error = perfmon_event_attach (&event->pm_event, thread);
  90. error_check (error, "perfmon_event_attach");
  91. }
  92. static void
  93. test_event_attach_cpu (struct test_event *event, uint32_t cpu)
  94. {
  95. int error = perfmon_event_attach_cpu (&event->pm_event, cpu);
  96. error_check (error, "perfmon_event_attach_cpu");
  97. }
  98. static void
  99. test_event_detach (struct test_event *event)
  100. {
  101. int error = perfmon_event_detach (&event->pm_event);
  102. error_check (error, "perfmon_event_detach");
  103. }
  104. static uint64_t
  105. test_event_read (struct test_event *event)
  106. {
  107. uint64_t value = perfmon_event_read (&event->pm_event);
  108. log_info ("test: %s: %llu", event->name, value);
  109. return (value);
  110. }
  111. static void
  112. test_event_save (struct test_event *event)
  113. {
  114. event->last_value = test_event_read (event);
  115. }
  116. static void
  117. test_event_check (struct test_event *event, bool change_expected)
  118. {
  119. uint64_t value = test_event_read (event);
  120. bool changed = (value != event->last_value);
  121. if (changed != change_expected)
  122. panic ("test: invalid value");
  123. event->last_value = value;
  124. }
  125. static void
  126. test_group_init (struct test_group *group)
  127. {
  128. list_init (&group->events);
  129. }
  130. static void
  131. test_group_add (struct test_group *group, struct test_event *event)
  132. {
  133. list_insert_tail (&group->events, &event->node);
  134. }
  135. static void
  136. test_group_attach (struct test_group *group, struct thread *thread)
  137. {
  138. struct test_event *event;
  139. list_for_each_entry (&group->events, event, node)
  140. test_event_attach (event, thread);
  141. }
  142. static void
  143. test_group_attach_cpu (struct test_group *group, uint32_t cpu)
  144. {
  145. struct test_event *event;
  146. list_for_each_entry (&group->events, event, node)
  147. test_event_attach_cpu (event, cpu);
  148. }
  149. static void
  150. test_group_detach (struct test_group *group)
  151. {
  152. struct test_event *event;
  153. list_for_each_entry (&group->events, event, node)
  154. test_event_detach (event);
  155. }
  156. static void
  157. test_group_save (struct test_group *group)
  158. {
  159. struct test_event *event;
  160. list_for_each_entry (&group->events, event, node)
  161. test_event_save (event);
  162. }
  163. static void
  164. test_group_check (struct test_group *group, bool change_expected)
  165. {
  166. struct test_event *event;
  167. list_for_each_entry (&group->events, event, node)
  168. test_event_check (event, change_expected);
  169. }
  170. static void
  171. test_run (void *arg __unused)
  172. {
  173. bool report = true;
  174. mutex_lock (&test_mutex);
  175. while (test_state != TEST_STATE_TERMINATED)
  176. {
  177. if (test_state == TEST_STATE_SUSPENDED)
  178. {
  179. log_info ("test: runner suspended");
  180. report = true;
  181. condition_wait (&test_condition, &test_mutex);
  182. }
  183. else
  184. {
  185. mutex_unlock (&test_mutex);
  186. if (report)
  187. {
  188. log_info ("test: runner running");
  189. report = false;
  190. }
  191. mutex_lock (&test_mutex);
  192. }
  193. }
  194. mutex_unlock (&test_mutex);
  195. }
  196. static void
  197. test_fill (void *arg __unused)
  198. {
  199. while (1)
  200. if (atomic_load_rlx (&test_state) == TEST_STATE_TERMINATED)
  201. break;
  202. }
  203. static void
  204. test_wait_state (const struct thread *thread, uint32_t state)
  205. {
  206. while (1)
  207. {
  208. if (thread_state (thread) == state)
  209. break;
  210. thread_delay (1, false);
  211. }
  212. }
  213. static void
  214. test_resume (struct thread *thread)
  215. {
  216. test_wait_state (thread, THREAD_SLEEPING);
  217. mutex_lock (&test_mutex);
  218. assert (test_state == TEST_STATE_SUSPENDED);
  219. atomic_store_rlx (&test_state, TEST_STATE_RUNNING);
  220. condition_signal (&test_condition);
  221. mutex_unlock (&test_mutex);
  222. test_wait_state (thread, THREAD_RUNNING);
  223. }
  224. static void
  225. test_suspend (struct thread *thread)
  226. {
  227. test_wait_state (thread, THREAD_RUNNING);
  228. mutex_lock (&test_mutex);
  229. assert (test_state == TEST_STATE_RUNNING);
  230. atomic_store_rlx (&test_state, TEST_STATE_SUSPENDED);
  231. mutex_unlock (&test_mutex);
  232. test_wait_state (thread, THREAD_SLEEPING);
  233. }
  234. static void
  235. test_terminate (void)
  236. {
  237. mutex_lock (&test_mutex);
  238. test_state = TEST_STATE_TERMINATED;
  239. condition_signal (&test_condition);
  240. mutex_unlock (&test_mutex);
  241. }
  242. static void
  243. test_control (void *arg)
  244. {
  245. struct thread *runner = arg;
  246. struct test_event thread_cycle, cpu_cycle;
  247. struct test_group thread_group, cpu_group;
  248. test_event_init (&thread_cycle, PERFMON_EV_CYCLE, "thread_cycle");
  249. test_group_init (&thread_group);
  250. test_group_add (&thread_group, &thread_cycle);
  251. test_event_init (&cpu_cycle, PERFMON_EV_CYCLE, "cpu_cycle");
  252. test_group_init (&cpu_group);
  253. test_group_add (&cpu_group, &cpu_cycle);
  254. test_group_attach (&thread_group, runner);
  255. test_group_attach_cpu (&cpu_group, 0);
  256. test_group_save (&thread_group);
  257. test_group_save (&cpu_group);
  258. test_resume (runner);
  259. test_wait ();
  260. test_suspend (runner);
  261. test_group_check (&thread_group, true);
  262. test_group_check (&cpu_group, true);
  263. test_wait ();
  264. test_group_check (&thread_group, false);
  265. test_group_check (&cpu_group, true);
  266. test_terminate ();
  267. test_group_detach (&cpu_group);
  268. test_group_detach (&thread_group);
  269. thread_join (runner);
  270. log_info ("test (perfmon_thread): OK");
  271. }
  272. TEST_INLINE (perfmon_thread)
  273. {
  274. condition_init (&test_condition);
  275. mutex_init (&test_mutex);
  276. test_state = TEST_STATE_SUSPENDED;
  277. struct cpumap *cpumap;
  278. int error = cpumap_create (&cpumap);
  279. error_check (error, "cpumap_create");
  280. cpumap_zero (cpumap);
  281. cpumap_set (cpumap, 0);
  282. struct thread_attr attr;
  283. thread_attr_init (&attr, THREAD_KERNEL_PREFIX "test_run");
  284. thread_attr_set_cpumap (&attr, cpumap);
  285. struct thread *runner;
  286. error = thread_create (&runner, &attr, test_run, NULL);
  287. error_check (error, "thread_create");
  288. thread_attr_init (&attr, THREAD_KERNEL_PREFIX "test_fill");
  289. thread_attr_set_detached (&attr);
  290. thread_attr_set_cpumap (&attr, cpumap);
  291. thread_attr_set_priority (&attr, THREAD_SCHED_FS_PRIO_MIN);
  292. error = thread_create (NULL, &attr, test_fill, NULL);
  293. error_check (error, "thread_create");
  294. thread_attr_init (&attr, THREAD_KERNEL_PREFIX "test_control");
  295. thread_attr_set_detached (&attr);
  296. error = thread_create (NULL, &attr, test_control, runner);
  297. error_check (error, "thread_create");
  298. cpumap_destroy (cpumap);
  299. return (TEST_RUNNINNG);
  300. }