thread.h 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814
  1. /*
  2. * Copyright (c) 2012-2018 Richard Braun.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. *
  18. * The thread module aims at providing an interface suitable to implement
  19. * POSIX scheduling policies. As such, it provides scheduling classes and
  20. * policies that closely match the standard ones. The "real-time" policies
  21. * (FIFO and RR) directly map the first-in first-out (SCHED_FIFO) and
  22. * round-robin (SCHED_RR) policies, while the "fair-scheduling" policy (FS)
  23. * can be used for the normal SCHED_OTHER policy. The idle policy is reserved
  24. * for idling kernel threads.
  25. *
  26. * By convention, the name of a kernel thread is built by concatenating the
  27. * kernel name and the name of the start function, separated with an underscore.
  28. * Threads that are bound to a processor also include the "/cpu_id" suffix.
  29. * For example, "x15_thread_balance/1" is the name of the inter-processor
  30. * balancer thread of the second processor.
  31. */
  32. #ifndef KERN_THREAD_H
  33. #define KERN_THREAD_H
  34. #include <assert.h>
  35. #include <stdbool.h>
  36. #include <stddef.h>
  37. #include <stdint.h>
  38. #include <stdnoreturn.h>
  39. #include <kern/atomic.h>
  40. #include <kern/init.h>
  41. #include <kern/cpumap.h>
  42. #include <kern/kernel.h>
  43. #include <kern/macros.h>
  44. #include <kern/spinlock_types.h>
  45. #include <kern/turnstile_types.h>
  46. #include <machine/cpu.h>
  47. #include <machine/tcb.h>
  48. /*
  49. * Thread structure.
  50. */
  51. struct thread;
  52. /*
  53. * The global priority of a thread is meant to be compared against
  54. * another global priority to determine which thread has higher priority.
  55. */
  56. struct thread_sched_data {
  57. unsigned char sched_policy;
  58. unsigned char sched_class;
  59. unsigned short priority;
  60. unsigned int global_priority;
  61. };
  62. /*
  63. * Thread name buffer size.
  64. */
  65. #define THREAD_NAME_SIZE 32
  66. #include <kern/thread_i.h>
  67. #define THREAD_KERNEL_PREFIX KERNEL_NAME "_"
  68. /*
  69. * Thread states.
  70. */
  71. #define THREAD_RUNNING 0
  72. #define THREAD_SLEEPING 1
  73. #define THREAD_DEAD 2
  74. #define THREAD_SUSPENDED 3
  75. /*
  76. * Scheduling policies.
  77. *
  78. * The idle policy is reserved for the per-CPU idle threads.
  79. */
  80. #define THREAD_SCHED_POLICY_FIFO 0
  81. #define THREAD_SCHED_POLICY_RR 1
  82. #define THREAD_SCHED_POLICY_FS 2
  83. #define THREAD_SCHED_POLICY_IDLE 3
  84. #define THREAD_NR_SCHED_POLICIES 4
  85. /*
  86. * Real-time priority properties.
  87. */
  88. #define THREAD_SCHED_RT_PRIO_MIN 0
  89. #define THREAD_SCHED_RT_PRIO_MAX 31
  90. /*
  91. * Fair-scheduling priority properties.
  92. */
  93. #define THREAD_SCHED_FS_PRIO_MIN 0
  94. #define THREAD_SCHED_FS_PRIO_DEFAULT 20
  95. #define THREAD_SCHED_FS_PRIO_MAX 39
  96. /*
  97. * Thread creation attributes.
  98. */
  99. struct thread_attr {
  100. const char *name;
  101. unsigned long flags;
  102. struct cpumap *cpumap;
  103. struct task *task;
  104. unsigned char policy;
  105. unsigned short priority;
  106. };
  107. /*
  108. * Initialize thread creation attributes with default values.
  109. *
  110. * It is guaranteed that these default values include :
  111. * - thread is joinable
  112. * - no processor affinity
  113. * - task is inherited from parent thread
  114. * - policy is fair-scheduling
  115. * - priority is fair-scheduling default
  116. *
  117. * If the policy is changed, the priority, if applicable, must be updated
  118. * as well.
  119. */
  120. static inline void
  121. thread_attr_init(struct thread_attr *attr, const char *name)
  122. {
  123. attr->name = name;
  124. attr->flags = 0;
  125. attr->cpumap = NULL;
  126. attr->task = NULL;
  127. attr->policy = THREAD_SCHED_POLICY_FS;
  128. attr->priority = THREAD_SCHED_FS_PRIO_DEFAULT;
  129. }
  130. static inline void
  131. thread_attr_set_detached(struct thread_attr *attr)
  132. {
  133. attr->flags |= THREAD_ATTR_DETACHED;
  134. }
  135. static inline void
  136. thread_attr_set_cpumap(struct thread_attr *attr, struct cpumap *cpumap)
  137. {
  138. attr->cpumap = cpumap;
  139. }
  140. static inline void
  141. thread_attr_set_task(struct thread_attr *attr, struct task *task)
  142. {
  143. attr->task = task;
  144. }
  145. static inline void
  146. thread_attr_set_policy(struct thread_attr *attr, unsigned char policy)
  147. {
  148. attr->policy = policy;
  149. }
  150. static inline void
  151. thread_attr_set_priority(struct thread_attr *attr, unsigned short priority)
  152. {
  153. attr->priority = priority;
  154. }
  155. /*
  156. * Thread entry point.
  157. *
  158. * Loaded TCBs are expected to call this function with interrupts disabled.
  159. */
  160. void thread_main(void (*fn)(void *), void *arg);
  161. /*
  162. * Initialization of the thread module on APs.
  163. */
  164. void thread_ap_setup(void);
  165. /*
  166. * Create a thread.
  167. *
  168. * Creation attributes must be passed, but some of them may be NULL, in which
  169. * case the value is inherited from the caller. The name attribute must not be
  170. * NULL.
  171. *
  172. * If successful, and if the caller passed a non-NULL thread pointer, it is
  173. * filled with the address of the newly created thread.
  174. */
  175. int thread_create(struct thread **threadp, const struct thread_attr *attr,
  176. void (*fn)(void *), void *arg);
  177. /*
  178. * Terminate the calling thread.
  179. */
  180. noreturn void thread_exit(void);
  181. /*
  182. * Wait for the given thread to terminate and release its resources.
  183. */
  184. void thread_join(struct thread *thread);
  185. /*
  186. * Make the current thread sleep while waiting for an event.
  187. *
  188. * The interlock is used to synchronize the thread state with respect to
  189. * wake-ups, i.e. a wake-up request sent by another thread cannot be missed
  190. * if that thread is holding the interlock.
  191. *
  192. * As a special exception, threads that use preemption as a synchronization
  193. * mechanism can omit the interlock and pass a NULL pointer instead.
  194. * In any case, the preemption nesting level must strictly be one when calling
  195. * this function.
  196. *
  197. * The wait channel describes the reason why the thread is sleeping. The
  198. * address should refer to a relevant synchronization object, normally
  199. * containing the interlock, but not necessarily.
  200. *
  201. * When bounding the duration of the sleep, the caller must pass an absolute
  202. * time in ticks, and ETIMEDOUT is returned if that time is reached before
  203. * the thread is awoken.
  204. *
  205. * Implies a memory barrier.
  206. */
  207. void thread_sleep(struct spinlock *interlock, const void *wchan_addr,
  208. const char *wchan_desc);
  209. int thread_timedsleep(struct spinlock *interlock, const void *wchan_addr,
  210. const char *wchan_desc, uint64_t ticks);
  211. /*
  212. * Schedule a thread for execution on a processor.
  213. *
  214. * If the target thread is NULL, the calling thread, or already in the
  215. * running state, or in the suspended state, no action is performed and
  216. * EINVAL is returned.
  217. *
  218. * TODO Describe memory ordering with regard to thread_sleep().
  219. */
  220. int thread_wakeup(struct thread *thread);
  221. /*
  222. * Suspend a thread.
  223. *
  224. * A suspended thread may only be resumed by calling thread_resume().
  225. *
  226. * This operation is asynchronous, i.e. the caller must not expect the target
  227. * thread to be suspended on return.
  228. *
  229. * If attempting to suspend core system threads, or threads in the dead state,
  230. * or if the given thread is NULL, the request is ignored and EINVAL is
  231. * returned. If the target thread is already suspended, the call turns into
  232. * a no-op and merely returns success.
  233. */
  234. int thread_suspend(struct thread *thread);
  235. /*
  236. * Resume a thread.
  237. *
  238. * This call is equivalent to thread_wakeup(), with the exception that
  239. * it may also wake up suspended threads.
  240. */
  241. int thread_resume(struct thread *thread);
  242. /*
  243. * Suspend execution of the calling thread.
  244. */
  245. void thread_delay(uint64_t ticks, bool absolute);
  246. /*
  247. * Start running threads on the local processor.
  248. *
  249. * Interrupts must be disabled when calling this function.
  250. */
  251. noreturn void thread_run_scheduler(void);
  252. /*
  253. * Make the calling thread release the processor.
  254. *
  255. * This call does nothing if preemption is disabled, or the scheduler
  256. * determines the caller should continue to run (e.g. it's currently the only
  257. * runnable thread).
  258. *
  259. * Implies a full memory barrier if a context switch occurred.
  260. */
  261. void thread_yield(void);
  262. /*
  263. * Report a scheduling interrupt from a remote processor.
  264. */
  265. void thread_schedule_intr(void);
  266. /*
  267. * Report a periodic event on the current processor.
  268. *
  269. * Interrupts and preemption must be disabled when calling this function.
  270. */
  271. void thread_report_periodic_event(void);
  272. /*
  273. * Set thread scheduling parameters.
  274. */
  275. void thread_setscheduler(struct thread *thread, unsigned char policy,
  276. unsigned short priority);
  277. /*
  278. * Variant used for priority inheritance.
  279. *
  280. * The caller must hold the turnstile thread data lock and no turnstile
  281. * locks when calling this function.
  282. */
  283. void thread_pi_setscheduler(struct thread *thread, unsigned char policy,
  284. unsigned short priority);
  285. static inline void
  286. thread_ref(struct thread *thread)
  287. {
  288. unsigned long nr_refs;
  289. nr_refs = atomic_fetch_add(&thread->nr_refs, 1UL, ATOMIC_RELAXED);
  290. assert(nr_refs != (unsigned long)-1);
  291. }
  292. static inline void
  293. thread_unref(struct thread *thread)
  294. {
  295. unsigned long nr_refs;
  296. nr_refs = atomic_fetch_sub(&thread->nr_refs, 1UL, ATOMIC_ACQ_REL);
  297. assert(nr_refs != 0);
  298. if (nr_refs == 1) {
  299. thread_terminate(thread);
  300. }
  301. }
  302. static inline const void *
  303. thread_wchan_addr(const struct thread *thread)
  304. {
  305. return thread->wchan_addr;
  306. }
  307. static inline const char *
  308. thread_wchan_desc(const struct thread *thread)
  309. {
  310. return thread->wchan_desc;
  311. }
  312. /*
  313. * Return a character representation of the state of a thread.
  314. */
  315. char thread_state_to_chr(unsigned int state);
  316. static inline const struct thread_sched_data *
  317. thread_get_user_sched_data(const struct thread *thread)
  318. {
  319. return &thread->user_sched_data;
  320. }
  321. static inline const struct thread_sched_data *
  322. thread_get_real_sched_data(const struct thread *thread)
  323. {
  324. return &thread->real_sched_data;
  325. }
  326. /*
  327. * If the caller requires the scheduling data to be stable, it
  328. * must lock one of the following objects :
  329. * - the containing run queue
  330. * - the per-thread turnstile data (turnstile_td)
  331. *
  332. * Both are locked when scheduling data are updated.
  333. */
  334. static inline unsigned char
  335. thread_user_sched_policy(const struct thread *thread)
  336. {
  337. return thread_get_user_sched_data(thread)->sched_policy;
  338. }
  339. static inline unsigned char
  340. thread_user_sched_class(const struct thread *thread)
  341. {
  342. return thread_get_user_sched_data(thread)->sched_class;
  343. }
  344. static inline unsigned short
  345. thread_user_priority(const struct thread *thread)
  346. {
  347. return thread_get_user_sched_data(thread)->priority;
  348. }
  349. static inline unsigned int
  350. thread_user_global_priority(const struct thread *thread)
  351. {
  352. return thread_get_user_sched_data(thread)->global_priority;
  353. }
  354. static inline unsigned char
  355. thread_real_sched_policy(const struct thread *thread)
  356. {
  357. return thread_get_real_sched_data(thread)->sched_policy;
  358. }
  359. static inline unsigned char
  360. thread_real_sched_class(const struct thread *thread)
  361. {
  362. return thread_get_real_sched_data(thread)->sched_class;
  363. }
  364. static inline unsigned short
  365. thread_real_priority(const struct thread *thread)
  366. {
  367. return thread_get_real_sched_data(thread)->priority;
  368. }
  369. static inline unsigned int
  370. thread_real_global_priority(const struct thread *thread)
  371. {
  372. return thread_get_real_sched_data(thread)->global_priority;
  373. }
  374. /*
  375. * Return a string representation of the scheduling class of a thread.
  376. */
  377. const char * thread_sched_class_to_str(unsigned char sched_class);
  378. static inline struct tcb *
  379. thread_get_tcb(struct thread *thread)
  380. {
  381. return &thread->tcb;
  382. }
  383. static inline struct thread *
  384. thread_from_tcb(struct tcb *tcb)
  385. {
  386. return structof(tcb, struct thread, tcb);
  387. }
  388. static inline struct thread *
  389. thread_self(void)
  390. {
  391. return thread_from_tcb(tcb_current());
  392. }
  393. /*
  394. * Main scheduler invocation call.
  395. *
  396. * Called on return from interrupt or when reenabling preemption.
  397. */
  398. void thread_schedule(void);
  399. /*
  400. * Sleep queue lending functions.
  401. */
  402. static inline struct sleepq *
  403. thread_sleepq_lend(void)
  404. {
  405. struct sleepq *sleepq;
  406. sleepq = thread_self()->priv_sleepq;
  407. assert(sleepq != NULL);
  408. thread_self()->priv_sleepq = NULL;
  409. return sleepq;
  410. }
  411. static inline void
  412. thread_sleepq_return(struct sleepq *sleepq)
  413. {
  414. assert(sleepq != NULL);
  415. assert(thread_self()->priv_sleepq == NULL);
  416. thread_self()->priv_sleepq = sleepq;
  417. }
  418. /*
  419. * Turnstile lending functions.
  420. */
  421. static inline struct turnstile *
  422. thread_turnstile_lend(void)
  423. {
  424. struct turnstile *turnstile;
  425. turnstile = thread_self()->priv_turnstile;
  426. assert(turnstile != NULL);
  427. thread_self()->priv_turnstile = NULL;
  428. return turnstile;
  429. }
  430. static inline void
  431. thread_turnstile_return(struct turnstile *turnstile)
  432. {
  433. assert(turnstile != NULL);
  434. assert(thread_self()->priv_turnstile == NULL);
  435. thread_self()->priv_turnstile = turnstile;
  436. }
  437. static inline struct turnstile_td *
  438. thread_turnstile_td(struct thread *thread)
  439. {
  440. return &thread->turnstile_td;
  441. }
  442. /*
  443. * Priority propagation functions.
  444. */
  445. static inline bool
  446. thread_priority_propagation_needed(void)
  447. {
  448. return thread_self()->propagate_priority;
  449. }
  450. static inline void
  451. thread_set_priority_propagation_needed(void)
  452. {
  453. thread_self()->propagate_priority = true;
  454. }
  455. void thread_propagate_priority(void);
  456. /*
  457. * Migration control functions.
  458. *
  459. * Functions that change the migration state are implicit compiler barriers.
  460. */
  461. static inline int
  462. thread_pinned(void)
  463. {
  464. return thread_self()->pin_level != 0;
  465. }
  466. static inline void
  467. thread_pin(void)
  468. {
  469. struct thread *thread;
  470. thread = thread_self();
  471. thread->pin_level++;
  472. assert(thread->pin_level != 0);
  473. barrier();
  474. }
  475. static inline void
  476. thread_unpin(void)
  477. {
  478. struct thread *thread;
  479. barrier();
  480. thread = thread_self();
  481. assert(thread->pin_level != 0);
  482. thread->pin_level--;
  483. }
  484. /*
  485. * Preemption control functions.
  486. *
  487. * Functions that change the preemption state are implicit compiler barriers.
  488. */
  489. static inline int
  490. thread_preempt_enabled(void)
  491. {
  492. return thread_self()->preempt_level == 0;
  493. }
  494. static inline void
  495. thread_preempt_disable(void)
  496. {
  497. struct thread *thread;
  498. thread = thread_self();
  499. thread->preempt_level++;
  500. assert(thread->preempt_level != 0);
  501. barrier();
  502. }
  503. static inline void
  504. thread_preempt_enable_no_resched(void)
  505. {
  506. struct thread *thread;
  507. barrier();
  508. thread = thread_self();
  509. assert(thread->preempt_level != 0);
  510. thread->preempt_level--;
  511. /*
  512. * Don't perform priority propagation here, because this function is
  513. * called on return from interrupt, where the transient state may
  514. * incorrectly trigger it.
  515. */
  516. }
  517. static inline void
  518. thread_preempt_enable(void)
  519. {
  520. thread_preempt_enable_no_resched();
  521. if (thread_priority_propagation_needed()
  522. && thread_preempt_enabled()) {
  523. thread_propagate_priority();
  524. }
  525. thread_schedule();
  526. }
  527. static inline void
  528. thread_preempt_disable_intr_save(unsigned long *flags)
  529. {
  530. thread_preempt_disable();
  531. cpu_intr_save(flags);
  532. }
  533. static inline void
  534. thread_preempt_enable_intr_restore(unsigned long flags)
  535. {
  536. cpu_intr_restore(flags);
  537. thread_preempt_enable();
  538. }
  539. /*
  540. * Interrupt level control functions.
  541. *
  542. * Functions that change the interrupt level are implicit compiler barriers.
  543. */
  544. static inline bool
  545. thread_interrupted(void)
  546. {
  547. return thread_self()->intr_level != 0;
  548. }
  549. static inline bool
  550. thread_check_intr_context(void)
  551. {
  552. return thread_interrupted()
  553. && !cpu_intr_enabled()
  554. && !thread_preempt_enabled();
  555. }
  556. static inline void
  557. thread_intr_enter(void)
  558. {
  559. struct thread *thread;
  560. thread = thread_self();
  561. if (thread->intr_level == 0) {
  562. thread_preempt_disable();
  563. }
  564. thread->intr_level++;
  565. assert(thread->intr_level != 0);
  566. barrier();
  567. }
  568. static inline void
  569. thread_intr_leave(void)
  570. {
  571. struct thread *thread;
  572. barrier();
  573. thread = thread_self();
  574. assert(thread->intr_level != 0);
  575. thread->intr_level--;
  576. if (thread->intr_level == 0) {
  577. thread_preempt_enable_no_resched();
  578. }
  579. }
  580. /*
  581. * RCU functions.
  582. */
  583. static inline struct rcu_reader *
  584. thread_rcu_reader(struct thread *thread)
  585. {
  586. return &thread->rcu_reader;
  587. }
  588. /*
  589. * Thread-specific data functions.
  590. */
  591. #if CONFIG_THREAD_MAX_TSD_KEYS != 0
  592. /*
  593. * Type for thread-specific data destructor.
  594. */
  595. typedef void (*thread_tsd_dtor_fn_t)(void *);
  596. /*
  597. * Allocate a TSD key.
  598. *
  599. * If not NULL, the destructor is called on thread destruction on the pointer
  600. * associated with the allocated key.
  601. */
  602. void thread_key_create(unsigned int *keyp, thread_tsd_dtor_fn_t dtor);
  603. /*
  604. * Set the pointer associated with a key for the given thread.
  605. */
  606. static inline void
  607. thread_tsd_set(struct thread *thread, unsigned int key, void *ptr)
  608. {
  609. thread->tsd[key] = ptr;
  610. }
  611. /*
  612. * Return the pointer associated with a key for the given thread.
  613. */
  614. static inline void *
  615. thread_tsd_get(struct thread *thread, unsigned int key)
  616. {
  617. return thread->tsd[key];
  618. }
  619. /*
  620. * Set the pointer associated with a key for the calling thread.
  621. */
  622. static inline void
  623. thread_set_specific(unsigned int key, void *ptr)
  624. {
  625. thread_tsd_set(thread_self(), key, ptr);
  626. }
  627. /*
  628. * Return the pointer associated with a key for the calling thread.
  629. */
  630. static inline void *
  631. thread_get_specific(unsigned int key)
  632. {
  633. return thread_tsd_get(thread_self(), key);
  634. }
  635. #endif /* CONFIG_THREAD_MAX_TSD_KEYS != 0 */
  636. static inline const char *
  637. thread_name(const struct thread *thread)
  638. {
  639. return thread->name;
  640. }
  641. #ifdef CONFIG_PERFMON
  642. static inline struct perfmon_td *
  643. thread_get_perfmon_td(struct thread *thread)
  644. {
  645. return &thread->perfmon_td;
  646. }
  647. #endif /* CONFIG_PERFMON */
  648. /*
  649. * Return the last CPU on which the thread has been scheduled.
  650. *
  651. * This call isn't synchronized, and the caller may obtain an outdated value.
  652. */
  653. unsigned int thread_cpu(const struct thread *thread);
  654. /*
  655. * Return the current state of the given thread.
  656. *
  657. * This call isn't synchronized, and the caller may obtain an outdated value.
  658. */
  659. unsigned int thread_state(const struct thread *thread);
  660. /*
  661. * Return true if the given thread is running.
  662. *
  663. * This call isn't synchronized, and the caller may obtain an outdated value.
  664. */
  665. bool thread_is_running(const struct thread *thread);
  666. /*
  667. * Get the CPU affinity mask of the specified thread.
  668. */
  669. int thread_get_affinity(const struct thread *thread, struct cpumap *cpumap);
  670. /*
  671. * Set the CPU affinity mask for the specified thread.
  672. */
  673. int thread_set_affinity(struct thread *thread, const struct cpumap *cpumap);
  674. /*
  675. * This init operation provides :
  676. * - a dummy thread context for the BSP, allowing the use of thread_self()
  677. */
  678. INIT_OP_DECLARE(thread_setup_booter);
  679. /*
  680. * This init operation provides :
  681. * - same as thread_setup_booter
  682. * - BSP run queue initialization
  683. */
  684. INIT_OP_DECLARE(thread_bootstrap);
  685. /*
  686. * This init operation provides :
  687. * - thread creation
  688. * - module fully initialized
  689. */
  690. INIT_OP_DECLARE(thread_setup);
  691. #endif /* KERN_THREAD_H */