thread.h 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987
  1. /*
  2. * Copyright (c) 2012-2018 Richard Braun.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. *
  18. * The thread module aims at providing an interface suitable to implement
  19. * POSIX scheduling policies. As such, it provides scheduling classes and
  20. * policies that closely match the standard ones. The "real-time" policies
  21. * (FIFO and RR) directly map the first-in first-out (SCHED_FIFO) and
  22. * round-robin (SCHED_RR) policies, while the "fair-scheduling" policy (FS)
  23. * can be used for the normal SCHED_OTHER policy. The idle policy is reserved
  24. * for idling kernel threads.
  25. *
  26. * By convention, the name of a kernel thread is built by concatenating the
  27. * kernel name and the name of the start function, separated with an underscore.
  28. * Threads that are bound to a processor also include the "/cpu_id" suffix.
  29. * For example, "x15_thread_balance/1" is the name of the inter-processor
  30. * balancer thread of the second processor.
  31. */
  32. #ifndef KERN_THREAD_H
  33. #define KERN_THREAD_H
  34. #include <assert.h>
  35. #include <stdbool.h>
  36. #include <stddef.h>
  37. #include <stdint.h>
  38. #include <stdnoreturn.h>
  39. #include <kern/atomic.h>
  40. #include <kern/bulletin.h>
  41. #include <kern/cpumap.h>
  42. #include <kern/init.h>
  43. #include <kern/cpumap.h>
  44. #include <kern/futex.h>
  45. #include <kern/kernel.h>
  46. #include <kern/kuid.h>
  47. #include <kern/list_types.h>
  48. #include <kern/macros.h>
  49. #include <kern/perfmon_types.h>
  50. #include <kern/rcu_types.h>
  51. #include <kern/spinlock_types.h>
  52. #include <kern/turnstile_types.h>
  53. #include <kern/types.h>
  54. #include <kern/unwind.h>
  55. #include <machine/cpu.h>
  56. #include <machine/tcb.h>
  57. /*
  58. * The global priority of a thread is meant to be compared against
  59. * another global priority to determine which thread has higher priority.
  60. */
  61. struct thread_sched_data
  62. {
  63. uint8_t sched_policy;
  64. uint8_t sched_class;
  65. uint16_t priority;
  66. uint32_t global_priority;
  67. };
  68. // Thread name buffer size.
  69. #define THREAD_NAME_SIZE 32
  70. // Forward declarations.
  71. struct sleepq;
  72. struct thread_runq;
  73. struct thread_fs_runq;
  74. // Thread flags.
  75. #define THREAD_YIELD 0x1UL // Must yield the processor ASAP.
  76. #define THREAD_DETACHED 0x2UL // Resources automatically released on exit.
  77. // Scheduling data for a real-time thread.
  78. struct thread_rt_data
  79. {
  80. struct list node;
  81. uint16_t time_slice;
  82. };
  83. // Scheduling data for a fair-scheduling thread.
  84. struct thread_fs_data
  85. {
  86. struct list group_node;
  87. struct list runq_node;
  88. struct thread_fs_runq *fs_runq;
  89. size_t round;
  90. uint16_t weight;
  91. uint16_t work;
  92. };
  93. struct thread_pmap_data
  94. {
  95. uint64_t prev;
  96. void *pte;
  97. uintptr_t va;
  98. };
  99. #define THREAD_NR_PMAP_DATA 2
  100. /*
  101. * Thread structure.
  102. *
  103. * Threads don't have their own lock. Instead, the associated run queue
  104. * lock is used for synchronization. A number of members are thread-local
  105. * and require no synchronization. Others must be accessed with atomic
  106. * instructions.
  107. *
  108. * Locking keys :
  109. * (r) run queue
  110. * (t) turnstile_td
  111. * (T) task
  112. * (j) join_lock
  113. * (a) atomic
  114. * (-) thread-local
  115. * ( ) read-only
  116. *
  117. * (*) The runq member is used to determine which run queue lock must be
  118. * held to serialize access to the relevant members. However, it is only
  119. * updated while the associated run queue is locked. As a result, atomic
  120. * reads are only necessary outside critical sections.
  121. */
  122. struct thread
  123. {
  124. __cacheline_aligned struct tcb tcb; // (r)
  125. struct kuid_head kuid; // (a)
  126. unsigned long flags; // (a)
  127. // Sleep/wake-up synchronization members.
  128. struct thread_runq *runq; // (r,*)
  129. const void *wchan_addr; // (r)
  130. const char *wchan_desc; // (r)
  131. int wakeup_error; // (r)
  132. uint32_t state; // (a,r)
  133. bool in_runq; // (r)
  134. // Sleep queue available for lending.
  135. struct sleepq *priv_sleepq; // (-)
  136. // Turnstile available for lending.
  137. struct turnstile *priv_turnstile; // (-)
  138. struct turnstile_td turnstile_td; // (t)
  139. // Preemption level, preemption is enabled if 0.
  140. uint16_t preempt_level; // (-)
  141. // Pin level, migration is allowed if 0.
  142. uint16_t pin_level; // (-)
  143. // Interrupt level, in thread context if 0.
  144. uint16_t intr_level; // (-)
  145. // Page fault enablement level. Page faults are enabled if 0.
  146. uint16_t pagefault_level; // (-)
  147. // RCU per-thread data,
  148. struct rcu_reader rcu_reader; // (-)
  149. // Processors on which this thread is allowed to run.
  150. struct cpumap cpumap; // (r)
  151. struct thread_sched_data user_sched_data; // (r,t)
  152. struct thread_sched_data real_sched_data; // (r,t)
  153. /*
  154. * True if the real scheduling data are not the user scheduling data.
  155. *
  156. * Note that it doesn't provide any information about priority inheritance.
  157. * A thread may be part of a priority inheritance chain without its
  158. * priority being boosted.
  159. */
  160. bool boosted; // (r,t)
  161. // True if the thread is marked to suspend.
  162. bool suspend; // (r)
  163. // True if priority must be propagated when preemption is reenabled.
  164. bool propagate_priority; // (-)
  165. union
  166. {
  167. struct thread_rt_data rt_data; // (r)
  168. struct thread_fs_data fs_data; // (r)
  169. };
  170. /*
  171. * Members related to termination.
  172. *
  173. * The termination protocol is made of two steps :
  174. * 1/ The thread exits, thereby releasing its self reference, and
  175. * sets its state to dead before calling the scheduler.
  176. * 2/ Another thread must either already be joining, or join later.
  177. * When the thread reference counter drops to zero, the terminating
  178. * flag is set, and the joining thread is awoken, if any. After that,
  179. * the join operation polls the state until it sees the target thread
  180. * as dead, and then releases its resources.
  181. */
  182. struct thread *join_waiter; // (j)
  183. struct spinlock join_lock;
  184. bool terminating; // (j)
  185. struct task *task; // (T)
  186. struct list task_node; // (T)
  187. void *stack; // (-)
  188. char name[THREAD_NAME_SIZE]; // (T)
  189. #ifdef CONFIG_PERFMON
  190. struct perfmon_td perfmon_td; // ( )
  191. #endif
  192. struct unw_fixup_t *fixup; // (-)
  193. void *cur_port; // (-)
  194. struct task *xtask; // (-)
  195. struct futex_td *futex_td; // (-)
  196. struct bulletin dead_subs; // ( )
  197. struct thread_pmap_data pmap_data[THREAD_NR_PMAP_DATA]; // (-)
  198. };
  199. // Thread IPC message (TODO: Move to a specific header).
  200. struct thread_ipc_msg
  201. {
  202. uint32_t size;
  203. int op;
  204. union
  205. {
  206. char name[THREAD_NAME_SIZE];
  207. struct
  208. {
  209. void *map;
  210. uint32_t size;
  211. } cpumap;
  212. int id;
  213. };
  214. };
  215. // Thread IPC operations.
  216. enum
  217. {
  218. THREAD_IPC_GET_NAME,
  219. THREAD_IPC_SET_NAME,
  220. THREAD_IPC_GET_AFFINITY,
  221. THREAD_IPC_SET_AFFINITY,
  222. THREAD_IPC_GET_ID,
  223. };
  224. // Thread flags.
  225. #define THREAD_ATTR_DETACHED 0x1
  226. void thread_terminate (struct thread *thread);
  227. // Flag access functions.
  228. static inline void
  229. thread_set_flag (struct thread *thread, unsigned long flag)
  230. {
  231. atomic_or_rel (&thread->flags, flag);
  232. }
  233. static inline void
  234. thread_clear_flag (struct thread *thread, unsigned long flag)
  235. {
  236. atomic_and_rel (&thread->flags, ~flag);
  237. }
  238. static inline int
  239. thread_test_flag (struct thread *thread, unsigned long flag)
  240. {
  241. return ((atomic_load_acq (&thread->flags) & flag) != 0);
  242. }
  243. #define THREAD_KERNEL_PREFIX KERNEL_NAME "_"
  244. // Thread states.
  245. #define THREAD_RUNNING 0
  246. #define THREAD_SLEEPING 1
  247. #define THREAD_DEAD 2
  248. #define THREAD_SUSPENDED 3
  249. /*
  250. * Scheduling policies.
  251. *
  252. * The idle policy is reserved for the per-CPU idle threads.
  253. */
  254. #define THREAD_SCHED_POLICY_FIFO 0
  255. #define THREAD_SCHED_POLICY_RR 1
  256. #define THREAD_SCHED_POLICY_FS 2
  257. #define THREAD_SCHED_POLICY_IDLE 3
  258. #define THREAD_NR_SCHED_POLICIES 4
  259. // Real-time priority properties.
  260. #define THREAD_SCHED_RT_PRIO_MIN 0
  261. #define THREAD_SCHED_RT_PRIO_MAX 31
  262. // Fair-scheduling priority properties.
  263. #define THREAD_SCHED_FS_PRIO_MIN 0
  264. #define THREAD_SCHED_FS_PRIO_DEFAULT 20
  265. #define THREAD_SCHED_FS_PRIO_MAX 39
  266. // Thread creation attributes.
  267. struct thread_attr
  268. {
  269. const char *name;
  270. size_t flags;
  271. struct cpumap *cpumap;
  272. struct task *task;
  273. uint8_t policy;
  274. uint16_t priority;
  275. };
  276. /*
  277. * Initialize thread creation attributes with default values.
  278. *
  279. * It is guaranteed that these default values include :
  280. * - thread is joinable
  281. * - no processor affinity
  282. * - task is inherited from parent thread
  283. * - policy is fair-scheduling
  284. * - priority is fair-scheduling default
  285. * - no user stack
  286. *
  287. * If the policy is changed, the priority, if applicable, must be updated
  288. * as well.
  289. */
  290. static inline void
  291. thread_attr_init (struct thread_attr *attr, const char *name)
  292. {
  293. attr->name = name;
  294. attr->flags = 0;
  295. attr->cpumap = NULL;
  296. attr->task = NULL;
  297. attr->policy = THREAD_SCHED_POLICY_FS;
  298. attr->priority = THREAD_SCHED_FS_PRIO_DEFAULT;
  299. }
  300. static inline void
  301. thread_attr_set_detached (struct thread_attr *attr)
  302. {
  303. attr->flags |= THREAD_ATTR_DETACHED;
  304. }
  305. static inline void
  306. thread_attr_set_cpumap (struct thread_attr *attr, struct cpumap *cpumap)
  307. {
  308. attr->cpumap = cpumap;
  309. }
  310. static inline void
  311. thread_attr_set_task (struct thread_attr *attr, struct task *task)
  312. {
  313. attr->task = task;
  314. }
  315. static inline void
  316. thread_attr_set_policy (struct thread_attr *attr, uint8_t policy)
  317. {
  318. attr->policy = policy;
  319. }
  320. static inline void
  321. thread_attr_set_priority (struct thread_attr *attr, uint16_t priority)
  322. {
  323. attr->priority = priority;
  324. }
  325. /*
  326. * Thread entry point.
  327. *
  328. * Loaded TCBs are expected to call this function with interrupts disabled.
  329. */
  330. void thread_main (void (*fn) (void *), void *arg);
  331. // Initialization of the thread module on APs.
  332. void thread_ap_setup (void);
  333. /*
  334. * Create a thread.
  335. *
  336. * Creation attributes must be passed, but some of them may be NULL, in which
  337. * case the value is inherited from the caller. The name attribute must not be
  338. * NULL.
  339. *
  340. * If successful, and if the caller passed a non-NULL thread pointer, it is
  341. * filled with the address of the newly created thread.
  342. */
  343. int thread_create (struct thread **threadp, const struct thread_attr *attr,
  344. void (*fn) (void *), void *arg);
  345. // Terminate the calling thread.
  346. noreturn void thread_exit (void);
  347. // Wait for the given thread to terminate and release its resources.
  348. void thread_join (struct thread *thread);
  349. /*
  350. * Make the current thread sleep while waiting for an event.
  351. *
  352. * The interlock is used to synchronize the thread state with respect to
  353. * wake-ups, i.e. a wake-up request sent by another thread cannot be missed
  354. * if that thread is holding the interlock.
  355. *
  356. * As a special exception, threads that use preemption as a synchronization
  357. * mechanism can omit the interlock and pass a NULL pointer instead.
  358. * In any case, the preemption nesting level must strictly be one when calling
  359. * this function.
  360. *
  361. * The wait channel describes the reason why the thread is sleeping. The
  362. * address should refer to a relevant synchronization object, normally
  363. * containing the interlock, but not necessarily.
  364. *
  365. * When bounding the duration of the sleep, the caller must pass an absolute
  366. * time in ticks, and ETIMEDOUT is returned if that time is reached before
  367. * the thread is awoken.
  368. *
  369. * Implies a memory barrier.
  370. */
  371. void thread_sleep (struct spinlock *interlock, const void *wchan_addr,
  372. const char *wchan_desc);
  373. int thread_timedsleep (struct spinlock *interlock, const void *wchan_addr,
  374. const char *wchan_desc, uint64_t ticks);
  375. /*
  376. * Schedule a thread for execution on a processor.
  377. *
  378. * If the target thread is NULL, the calling thread, or already in the
  379. * running state, or in the suspended state, no action is performed and
  380. * EINVAL is returned.
  381. */
  382. int thread_wakeup (struct thread *thread);
  383. /*
  384. * Suspend a thread.
  385. *
  386. * A suspended thread may only be resumed by calling thread_resume().
  387. *
  388. * This operation is asynchronous, i.e. the caller must not expect the target
  389. * thread to be suspended on return.
  390. *
  391. * If attempting to suspend core system threads, or threads in the dead state,
  392. * or if the given thread is NULL, the request is ignored and EINVAL is
  393. * returned. If the target thread is already suspended, the call turns into
  394. * a no-op and merely returns success.
  395. */
  396. int thread_suspend (struct thread *thread);
  397. /*
  398. * Resume a thread.
  399. *
  400. * This call is equivalent to thread_wakeup(), with the exception that
  401. * it may also wake up suspended threads.
  402. */
  403. int thread_resume (struct thread *thread);
  404. // Suspend execution of the calling thread.
  405. void thread_delay (uint64_t ticks, bool absolute);
  406. /*
  407. * Start running threads on the local processor.
  408. *
  409. * Interrupts must be disabled when calling this function.
  410. */
  411. noreturn void thread_run_scheduler (void);
  412. /*
  413. * Make the calling thread release the processor.
  414. *
  415. * This call does nothing if preemption is disabled, or the scheduler
  416. * determines the caller should continue to run (e.g. it's currently the only
  417. * runnable thread).
  418. *
  419. * Implies a full memory barrier if a context switch occurred.
  420. */
  421. void thread_yield (void);
  422. // Report a scheduling interrupt from a remote processor.
  423. void thread_schedule_intr (void);
  424. /*
  425. * Report a periodic event on the current processor.
  426. *
  427. * Interrupts and preemption must be disabled when calling this function.
  428. */
  429. void thread_report_periodic_event (void);
  430. // Set thread scheduling parameters.
  431. void thread_setscheduler (struct thread *thread, uint8_t policy,
  432. uint16_t priority);
  433. /*
  434. * Variant used for priority inheritance.
  435. *
  436. * The caller must hold the turnstile thread data lock and no turnstile
  437. * locks when calling this function.
  438. */
  439. void thread_pi_setscheduler (struct thread *thread, uint8_t policy,
  440. uint16_t priority);
  441. static inline void
  442. thread_ref (struct thread *thread)
  443. {
  444. size_t nr_refs = atomic_add_rlx (&thread->kuid.nr_refs, 1);
  445. assert (nr_refs != (size_t)-1);
  446. }
  447. static inline void
  448. thread_unref (struct thread *thread)
  449. {
  450. size_t nr_refs = atomic_sub_acq_rel (&thread->kuid.nr_refs, 1);
  451. assert (nr_refs);
  452. if (nr_refs == 1)
  453. thread_terminate (thread);
  454. }
  455. static inline const void*
  456. thread_wchan_addr (const struct thread *thread)
  457. {
  458. return (thread->wchan_addr);
  459. }
  460. static inline const char*
  461. thread_wchan_desc (const struct thread *thread)
  462. {
  463. return (thread->wchan_desc);
  464. }
  465. // Return a character representation of the state of a thread.
  466. char thread_state_to_chr (uint32_t state);
  467. static inline const struct thread_sched_data*
  468. thread_get_user_sched_data (const struct thread *thread)
  469. {
  470. return (&thread->user_sched_data);
  471. }
  472. static inline const struct thread_sched_data*
  473. thread_get_real_sched_data (const struct thread *thread)
  474. {
  475. return (&thread->real_sched_data);
  476. }
  477. /*
  478. * If the caller requires the scheduling data to be stable, it
  479. * must lock one of the following objects :
  480. * - the containing run queue
  481. * - the per-thread turnstile data (turnstile_td)
  482. *
  483. * Both are locked when scheduling data are updated.
  484. */
  485. static inline uint8_t
  486. thread_user_sched_policy (const struct thread *thread)
  487. {
  488. return (thread_get_user_sched_data(thread)->sched_policy);
  489. }
  490. static inline uint8_t
  491. thread_user_sched_class (const struct thread *thread)
  492. {
  493. return (thread_get_user_sched_data(thread)->sched_class);
  494. }
  495. static inline uint16_t
  496. thread_user_priority (const struct thread *thread)
  497. {
  498. return (thread_get_user_sched_data(thread)->priority);
  499. }
  500. static inline uint32_t
  501. thread_user_global_priority (const struct thread *thread)
  502. {
  503. return (thread_get_user_sched_data(thread)->global_priority);
  504. }
  505. static inline uint8_t
  506. thread_real_sched_policy (const struct thread *thread)
  507. {
  508. return (thread_get_real_sched_data(thread)->sched_policy);
  509. }
  510. static inline uint8_t
  511. thread_real_sched_class (const struct thread *thread)
  512. {
  513. return (thread_get_real_sched_data(thread)->sched_class);
  514. }
  515. static inline uint16_t
  516. thread_real_priority (const struct thread *thread)
  517. {
  518. return (thread_get_real_sched_data(thread)->priority);
  519. }
  520. static inline uint32_t
  521. thread_real_global_priority (const struct thread *thread)
  522. {
  523. return (thread_get_real_sched_data(thread)->global_priority);
  524. }
  525. // Return a string representation of the scheduling class of a thread.
  526. const char* thread_sched_class_to_str (uint8_t sched_class);
  527. static inline struct tcb*
  528. thread_get_tcb (struct thread *thread)
  529. {
  530. return (&thread->tcb);
  531. }
  532. static inline struct thread*
  533. thread_from_tcb (struct tcb *tcb)
  534. {
  535. return (structof (tcb, struct thread, tcb));
  536. }
  537. static inline struct thread*
  538. thread_self (void)
  539. {
  540. return (thread_from_tcb (tcb_current ()));
  541. }
  542. static inline int
  543. thread_id (const struct thread *thread)
  544. {
  545. return ((int)thread->kuid.id);
  546. }
  547. /*
  548. * Main scheduler invocation call.
  549. *
  550. * Called on return from interrupt or when reenabling preemption.
  551. */
  552. void thread_schedule (void);
  553. // Sleep queue lending functions.
  554. static inline struct sleepq*
  555. thread_sleepq_lend (struct thread *self)
  556. {
  557. struct sleepq *sleepq = self->priv_sleepq;
  558. assert (sleepq);
  559. self->priv_sleepq = NULL;
  560. return (sleepq);
  561. }
  562. static inline void
  563. thread_sleepq_return (struct sleepq *sleepq)
  564. {
  565. assert (sleepq);
  566. assert (!thread_self()->priv_sleepq);
  567. thread_self()->priv_sleepq = sleepq;
  568. }
  569. // Turnstile lending functions.
  570. static inline struct turnstile*
  571. thread_turnstile_lend (void)
  572. {
  573. struct turnstile *turnstile = thread_self()->priv_turnstile;
  574. assert (turnstile);
  575. thread_self()->priv_turnstile = NULL;
  576. return (turnstile);
  577. }
  578. static inline void
  579. thread_turnstile_return (struct turnstile *turnstile)
  580. {
  581. assert (turnstile);
  582. assert (!thread_self()->priv_turnstile);
  583. thread_self()->priv_turnstile = turnstile;
  584. }
  585. static inline struct turnstile_td*
  586. thread_turnstile_td (struct thread *thread)
  587. {
  588. return (&thread->turnstile_td);
  589. }
  590. // Priority propagation functions.
  591. static inline bool
  592. thread_priority_propagation_needed (void)
  593. {
  594. return (thread_self()->propagate_priority);
  595. }
  596. static inline void
  597. thread_set_priority_propagation_needed (void)
  598. {
  599. thread_self()->propagate_priority = true;
  600. }
  601. void thread_propagate_priority (void);
  602. /*
  603. * Migration control functions.
  604. *
  605. * Functions that change the migration state are implicit compiler barriers.
  606. */
  607. static inline int
  608. thread_pinned (void)
  609. {
  610. return (thread_self()->pin_level != 0);
  611. }
  612. static void
  613. thread_pin_level (uint16_t *levelp)
  614. {
  615. ++*levelp;
  616. assert (*levelp);
  617. barrier ();
  618. }
  619. static inline void
  620. thread_pin (void)
  621. {
  622. thread_pin_level (&thread_self()->pin_level);
  623. }
  624. static inline void
  625. thread_unpin_level (uint16_t *levelp)
  626. {
  627. barrier ();
  628. assert (*levelp);
  629. --*levelp;
  630. }
  631. static inline void
  632. thread_unpin (void)
  633. {
  634. thread_unpin_level (&thread_self()->pin_level);
  635. }
  636. #define THREAD_PIN_GUARD() \
  637. CLEANUP (thread_pin_guard_fini) uint16_t __unused *UNIQ(tpg) = \
  638. ({ \
  639. uint16_t *p_ = &thread_self()->pin_level; \
  640. thread_pin_level (p_); \
  641. p_; \
  642. })
  643. static inline void
  644. thread_pin_guard_fini (void *ptr)
  645. {
  646. thread_unpin_level (*(uint16_t **)ptr);
  647. }
  648. /*
  649. * Preemption control functions.
  650. *
  651. * Functions that change the preemption state are implicit compiler barriers.
  652. */
  653. static inline int
  654. thread_preempt_enabled (void)
  655. {
  656. return (thread_self()->preempt_level == 0);
  657. }
  658. static inline void
  659. thread_preempt_disable (void)
  660. {
  661. struct thread *thread = thread_self ();
  662. ++thread->preempt_level;
  663. assert (thread->preempt_level);
  664. barrier ();
  665. }
  666. static inline void
  667. thread_preempt_enable_no_resched (void)
  668. {
  669. barrier ();
  670. struct thread *thread = thread_self ();
  671. assert (thread->preempt_level);
  672. --thread->preempt_level;
  673. /*
  674. * Don't perform priority propagation here, because this function is
  675. * called on return from interrupt, where the transient state may
  676. * incorrectly trigger it.
  677. */
  678. }
  679. static inline void
  680. thread_preempt_enable (void)
  681. {
  682. thread_preempt_enable_no_resched ();
  683. if (thread_priority_propagation_needed () &&
  684. thread_preempt_enabled ())
  685. thread_propagate_priority ();
  686. thread_schedule ();
  687. }
  688. static inline void
  689. thread_preempt_disable_intr_save (cpu_flags_t *flags)
  690. {
  691. thread_preempt_disable ();
  692. cpu_intr_save (flags);
  693. }
  694. static inline void
  695. thread_preempt_enable_intr_restore (cpu_flags_t flags)
  696. {
  697. cpu_intr_restore (flags);
  698. thread_preempt_enable ();
  699. }
  700. /*
  701. * Interrupt level control functions.
  702. *
  703. * Functions that change the interrupt level are implicit compiler barriers.
  704. */
  705. static inline bool
  706. thread_interrupted (void)
  707. {
  708. return (thread_self()->intr_level != 0);
  709. }
  710. static inline bool
  711. thread_check_intr_context (void)
  712. {
  713. return (thread_interrupted () && !cpu_intr_enabled () &&
  714. !thread_preempt_enabled ());
  715. }
  716. static inline void
  717. thread_intr_enter_level (uint16_t *ptr)
  718. {
  719. if (++*ptr == 1)
  720. thread_preempt_disable ();
  721. assert (*ptr);
  722. barrier ();
  723. }
  724. static inline void
  725. thread_intr_enter (void)
  726. {
  727. thread_intr_enter_level (&thread_self()->intr_level);
  728. }
  729. static inline void
  730. thread_intr_leave_level (uint16_t *ptr)
  731. {
  732. barrier ();
  733. assert (*ptr);
  734. if (--*ptr == 0)
  735. thread_preempt_enable_no_resched ();
  736. }
  737. static inline void
  738. thread_intr_leave (void)
  739. {
  740. thread_intr_leave_level (&thread_self()->intr_level);
  741. }
  742. static inline void
  743. thread_intr_guard_fini (void *ptr)
  744. {
  745. thread_intr_leave_level (*(uint16_t **)ptr);
  746. }
  747. #define THREAD_INTR_GUARD() \
  748. CLEANUP (thread_intr_guard_fini) uint16_t __unused *UNIQ(tig) = \
  749. ({ \
  750. uint16_t *p_ = &thread_self()->intr_level; \
  751. thread_intr_enter_level (p_); \
  752. p_; \
  753. })
  754. // RCU functions.
  755. static inline struct rcu_reader*
  756. thread_rcu_reader (struct thread *thread)
  757. {
  758. return (&thread->rcu_reader);
  759. }
  760. static inline const char*
  761. thread_name (const struct thread *thread)
  762. {
  763. return (thread->name);
  764. }
  765. #ifdef CONFIG_PERFMON
  766. static inline struct perfmon_td*
  767. thread_get_perfmon_td (struct thread *thread)
  768. {
  769. return (&thread->perfmon_td);
  770. }
  771. #endif
  772. // Page fault functions.
  773. static inline void
  774. thread_pagefault_enable (void)
  775. {
  776. --thread_self()->pagefault_level;
  777. }
  778. static inline void
  779. thread_pagefault_disable (void)
  780. {
  781. ++thread_self()->pagefault_level;
  782. }
  783. /*
  784. * Return the last CPU on which the thread has been scheduled.
  785. *
  786. * This call isn't synchronized, and the caller may obtain an outdated value.
  787. */
  788. uint32_t thread_cpu (const struct thread *thread);
  789. /*
  790. * Return the current state of the given thread.
  791. *
  792. * This call isn't synchronized, and the caller may obtain an outdated value.
  793. */
  794. uint32_t thread_state (const struct thread *thread);
  795. /*
  796. * Return true if the given thread is running.
  797. *
  798. * This call isn't synchronized, and the caller may obtain an outdated value.
  799. */
  800. bool thread_is_running (const struct thread *thread);
  801. // Get the CPU affinity mask of the specified thread.
  802. int thread_get_affinity (const struct thread *thread, struct cpumap *cpumap);
  803. // Set the CPU affinity mask for the specified thread.
  804. int thread_set_affinity (struct thread *thread, const struct cpumap *cpumap);
  805. // Look up a thread by its KUID.
  806. static inline struct thread*
  807. thread_by_kuid (uint32_t kuid)
  808. {
  809. return (kuid_find_type (kuid, struct thread, kuid, KUID_THREAD));
  810. }
  811. // Handle an IPC message on a thread capability.
  812. struct cap_iters;
  813. struct ipc_msg_data;
  814. ssize_t thread_handle_msg (struct thread *thread, struct cap_iters *src,
  815. struct cap_iters *dst, struct ipc_msg_data *data);
  816. /*
  817. * This init operation provides :
  818. * - a dummy thread context for the BSP, allowing the use of thread_self()
  819. */
  820. INIT_OP_DECLARE (thread_setup_booter);
  821. /*
  822. * This init operation provides :
  823. * - same as thread_setup_booter
  824. * - BSP run queue initialization
  825. */
  826. INIT_OP_DECLARE (thread_bootstrap);
  827. /*
  828. * This init operation provides :
  829. * - thread creation
  830. * - module fully initialized
  831. */
  832. INIT_OP_DECLARE (thread_setup);
  833. #endif