init.c 8.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409
  1. /*
  2. * Copyright (c) 2017 Richard Braun.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. *
  18. * The main purpose of the init module is to provide a convenient interface
  19. * to declare initialization operations and their dependency, infer an order
  20. * of execution based on the resulting graph (which must be acyclic), and
  21. * run the operations in that order. Topological sorting is achieved using
  22. * Kahn's algorithm.
  23. */
  24. #include <assert.h>
  25. #include <errno.h>
  26. #include <stdbool.h>
  27. #include <stddef.h>
  28. #include <stdint.h>
  29. #include <stdio.h>
  30. #include <string.h>
  31. #include <kern/init.h>
  32. #include <kern/slist.h>
  33. #include <kern/macros.h>
  34. #include <machine/cpu.h>
  35. extern struct init_op _init_ops;
  36. extern struct init_op _init_ops_end;
  37. static_assert(sizeof(struct init_op) == INIT_OP_ALIGN, "invalid init_op size");
  38. /*
  39. * List of initialization operations.
  40. *
  41. * This type is used for the output of the topological sort.
  42. */
  43. struct init_ops_list {
  44. struct slist ops;
  45. size_t size;
  46. };
  47. static void __init
  48. init_ops_list_init(struct init_ops_list *list)
  49. {
  50. slist_init(&list->ops);
  51. list->size = 0;
  52. }
  53. static size_t __init
  54. init_ops_list_size(const struct init_ops_list *list)
  55. {
  56. return list->size;
  57. }
  58. static void __init
  59. init_ops_list_push(struct init_ops_list *list, struct init_op *op)
  60. {
  61. slist_insert_head(&list->ops, &op->list_node);
  62. list->size++;
  63. }
  64. static struct init_op * __init
  65. init_ops_list_pop(struct init_ops_list *list)
  66. {
  67. struct init_op *op;
  68. if (list->size == 0) {
  69. return NULL;
  70. }
  71. op = slist_first_entry(&list->ops, struct init_op, list_node);
  72. slist_remove(&list->ops, NULL);
  73. list->size--;
  74. return op;
  75. }
  76. /*
  77. * Stack of initialization operations.
  78. *
  79. * This type is used internally by the topological sort algorithm.
  80. */
  81. struct init_ops_stack {
  82. struct slist ops;
  83. };
  84. static void __init
  85. init_ops_stack_init(struct init_ops_stack *stack)
  86. {
  87. slist_init(&stack->ops);
  88. }
  89. static void __init
  90. init_ops_stack_push(struct init_ops_stack *stack, struct init_op *op)
  91. {
  92. slist_insert_head(&stack->ops, &op->stack_node);
  93. }
  94. static struct init_op * __init
  95. init_ops_stack_pop(struct init_ops_stack *stack)
  96. {
  97. struct init_op *op;
  98. if (slist_empty(&stack->ops)) {
  99. return NULL;
  100. }
  101. op = slist_first_entry(&stack->ops, struct init_op, stack_node);
  102. slist_remove(&stack->ops, NULL);
  103. return op;
  104. }
  105. static struct init_op_dep * __init
  106. init_op_get_dep(const struct init_op *op, size_t index)
  107. {
  108. assert(index < op->nr_deps);
  109. return &op->deps[index];
  110. }
  111. static void __init
  112. init_op_init(struct init_op *op)
  113. {
  114. struct init_op_dep *dep;
  115. for (size_t i = 0; i < op->nr_deps; i++) {
  116. dep = init_op_get_dep(op, i);
  117. dep->op->nr_parents++;
  118. assert(dep->op->nr_parents != 0);
  119. }
  120. }
  121. static bool __init
  122. init_op_orphan(const struct init_op *op)
  123. {
  124. return (op->nr_parents == 0);
  125. }
  126. static bool __init
  127. init_op_pending(const struct init_op *op)
  128. {
  129. return op->state == INIT_OP_STATE_PENDING;
  130. }
  131. static void __init
  132. init_op_set_pending(struct init_op *op)
  133. {
  134. assert(op->state == INIT_OP_STATE_UNLINKED);
  135. op->state = INIT_OP_STATE_PENDING;
  136. }
  137. static bool __init
  138. init_op_complete(const struct init_op *op)
  139. {
  140. return op->state == INIT_OP_STATE_COMPLETE;
  141. }
  142. static void __init
  143. init_op_set_complete(struct init_op *op)
  144. {
  145. assert(init_op_pending(op));
  146. op->state = INIT_OP_STATE_COMPLETE;
  147. }
  148. static bool __init
  149. init_op_ready(const struct init_op *op)
  150. {
  151. const struct init_op_dep *dep;
  152. size_t i;
  153. for (i = 0; i < op->nr_deps; i++) {
  154. dep = init_op_get_dep(op, i);
  155. if (!init_op_complete(dep->op)
  156. || (dep->required && dep->op->error)) {
  157. return false;
  158. }
  159. }
  160. return true;
  161. }
  162. static void __init
  163. init_op_run(struct init_op *op)
  164. {
  165. if (init_op_ready(op)) {
  166. op->error = op->fn();
  167. }
  168. init_op_set_complete(op);
  169. }
  170. #ifdef CONFIG_INIT_DEBUG
  171. #define INIT_DEBUG_LOG_BUFFER_SIZE 8192
  172. struct init_debug_log {
  173. char buffer[INIT_DEBUG_LOG_BUFFER_SIZE];
  174. size_t index;
  175. };
  176. /*
  177. * Buffers used to store an easy-to-dump text representation of init operations.
  178. *
  179. * These buffers are meant to be retrieved through a debugging interface such
  180. * as JTAG.
  181. */
  182. struct init_debug_logs {
  183. struct init_debug_log roots; /* graph roots */
  184. struct init_debug_log cycles; /* operations with dependency cycles */
  185. struct init_debug_log pending; /* operations successfully sorted */
  186. struct init_debug_log complete; /* executed operations */
  187. };
  188. static struct init_debug_logs init_debug_logs;
  189. static void __init
  190. init_debug_log_append(struct init_debug_log *log, const char *name)
  191. {
  192. size_t size;
  193. if (log->index == sizeof(log->buffer)) {
  194. return;
  195. }
  196. size = sizeof(log->buffer) - log->index;
  197. log->index += snprintf(log->buffer + log->index, size, "%s ", name);
  198. if (log->index >= sizeof(log->buffer)) {
  199. log->index = sizeof(log->buffer);
  200. }
  201. }
  202. static void __init
  203. init_debug_append_root(const struct init_op *op)
  204. {
  205. init_debug_log_append(&init_debug_logs.roots, op->name);
  206. }
  207. static void __init
  208. init_debug_append_cycle(const struct init_op *op)
  209. {
  210. init_debug_log_append(&init_debug_logs.cycles, op->name);
  211. }
  212. static void __init
  213. init_debug_append_pending(const struct init_op *op)
  214. {
  215. init_debug_log_append(&init_debug_logs.pending, op->name);
  216. }
  217. static void __init
  218. init_debug_append_complete(const struct init_op *op)
  219. {
  220. init_debug_log_append(&init_debug_logs.complete, op->name);
  221. }
  222. static void __init
  223. init_debug_scan_not_pending(void)
  224. {
  225. const struct init_op *op;
  226. for (op = &_init_ops; op < &_init_ops_end; op++) {
  227. if (!init_op_pending(op)) {
  228. init_debug_append_cycle(op);
  229. }
  230. }
  231. }
  232. #else /* CONFIG_INIT_DEBUG */
  233. #define init_debug_append_root(roots)
  234. #define init_debug_append_pending(op)
  235. #define init_debug_append_complete(op)
  236. #define init_debug_scan_not_pending()
  237. #endif /* CONFIG_INIT_DEBUG */
  238. static void __init
  239. init_add_pending_op(struct init_ops_list *pending_ops, struct init_op *op)
  240. {
  241. assert(!init_op_pending(op));
  242. init_op_set_pending(op);
  243. init_ops_list_push(pending_ops, op);
  244. init_debug_append_pending(op);
  245. }
  246. static void __init
  247. init_op_visit(struct init_op *op, struct init_ops_stack *stack)
  248. {
  249. struct init_op_dep *dep;
  250. for (size_t i = 0; i < op->nr_deps; i++) {
  251. dep = init_op_get_dep(op, i);
  252. assert(dep->op->nr_parents != 0);
  253. dep->op->nr_parents--;
  254. if (init_op_orphan(dep->op)) {
  255. init_ops_stack_push(stack, dep->op);
  256. }
  257. }
  258. }
  259. static void __init
  260. init_check_ops_alignment(void)
  261. {
  262. uintptr_t start, end;
  263. start = (uintptr_t)&_init_ops;
  264. end = (uintptr_t)&_init_ops_end;
  265. if (((end - start) % INIT_OP_ALIGN) != 0) {
  266. cpu_halt();
  267. }
  268. }
  269. static void __init
  270. init_bootstrap(void)
  271. {
  272. struct init_op *op;
  273. init_check_ops_alignment();
  274. for (op = &_init_ops; op < &_init_ops_end; op++) {
  275. init_op_init(op);
  276. }
  277. }
  278. static void __init
  279. init_scan_roots(struct init_ops_stack *stack)
  280. {
  281. struct init_op *op;
  282. init_ops_stack_init(stack);
  283. for (op = &_init_ops; op < &_init_ops_end; op++) {
  284. if (init_op_orphan(op)) {
  285. init_ops_stack_push(stack, op);
  286. init_debug_append_root(op);
  287. }
  288. }
  289. }
  290. static void __init
  291. init_scan_ops(struct init_ops_list *pending_ops)
  292. {
  293. struct init_ops_stack stack;
  294. struct init_op *op;
  295. size_t nr_ops;
  296. init_scan_roots(&stack);
  297. for (;;) {
  298. op = init_ops_stack_pop(&stack);
  299. if (op == NULL) {
  300. break;
  301. }
  302. init_add_pending_op(pending_ops, op);
  303. init_op_visit(op, &stack);
  304. }
  305. init_debug_scan_not_pending();
  306. nr_ops = &_init_ops_end - &_init_ops;
  307. if (init_ops_list_size(pending_ops) != nr_ops) {
  308. cpu_halt();
  309. }
  310. }
  311. static void __init
  312. init_run_ops(struct init_ops_list *pending_ops)
  313. {
  314. struct init_op *op;
  315. for (;;) {
  316. op = init_ops_list_pop(pending_ops);
  317. if (op == NULL) {
  318. break;
  319. }
  320. init_op_run(op);
  321. init_debug_append_complete(op);
  322. }
  323. }
  324. void __init
  325. init_setup(void)
  326. {
  327. struct init_ops_list pending_ops;
  328. init_ops_list_init(&pending_ops);
  329. init_bootstrap();
  330. init_scan_ops(&pending_ops);
  331. init_run_ops(&pending_ops);
  332. }