scheduling.h 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. // SPDX-License-Identifier: GPL-3.0-or-later
  2. // Copyright © 2018-2019 Ariadne Devos
  3. /* sHT -- make sure each task can make some progress */
  4. #ifndef _sHT_SCHEDULING_H
  5. #define _sHT_SCHEDULING_H
  6. #include <sHT/bugs.h>
  7. #include <sHT/compiler.h>
  8. #include <sHT/nospec.h>
  9. /* For sHT_TASK_SCHEDULED */
  10. #include <sHT/task.h>
  11. #include <sHT/test.h>
  12. #include <stddef.h>
  13. /** A FIFO queue of tasks.
  14. A task can only be in one queue at the time,
  15. because of the @var{.next} pointer embedded inside.
  16. But on the upside, no memory has to be allocated seperately!
  17. Queued tasks have the @var{sHT_TASK_QUEUED} flag set.
  18. It is set by @var{sHT_qadd} and @var{sHT_qadd_try},
  19. it should not be set by any other function. */
  20. struct sHT_task_queue
  21. {
  22. /** The first task, or @code{NULL} if the queue is empty.
  23. Tasks are taken from the front. */
  24. struct sHT_task *head;
  25. /** A pointer to @code{t->next}, where @code{t} is the last task
  26. in the queue, or @var{head}, if the queue is empty.
  27. New tasks are added to the end.
  28. The last task, if any, has no 'next' pointer and is reachable
  29. within a finite number of steps from @var{head} through 'next'. */
  30. struct sHT_task **tailp;
  31. };
  32. /** Initialise a task queue, letting it be empty.
  33. @var{q} may not be accessed concurrently. */
  34. static inline void
  35. sHT_task_queue_init(struct sHT_task_queue *q)
  36. {
  37. *q = (struct sHT_task_queue) {
  38. /* Empty queue, no head */
  39. .head = NULL,
  40. .tailp = &q->head,
  41. };
  42. }
  43. /** Add a task to the back of the queue,
  44. @var{q}: any valid queue
  45. @var{task}: a non-queued task
  46. It may not already be in the queue.
  47. Modifies @code{q->tailp}, @code{*q->tailp}
  48. and accesses the flag @var{sHT_TASK_QUEUED} in @var{task}.
  49. These may not be accessed concurrently. */
  50. __attribute__((nonnull (1, 2)))
  51. static inline void
  52. sHT_qadd(struct sHT_task_queue *q, struct sHT_task *task)
  53. {
  54. sHT_require(!(task->flags & sHT_TASK_QUEUED));
  55. task->flags |= sHT_TASK_QUEUED;
  56. /* Invariants are temporarily violated */
  57. *q->tailp = task;
  58. q->tailp = &task->next;
  59. }
  60. /** Add a task to the back of the queue, if it isn't inside
  61. @var{q}: any valid queue
  62. @var{task}: a task. If queued, it must belong to @var{q}.
  63. Modifies @code{q->tailp}, @code{*q->tailp}
  64. and accesses the flag @var{sHT_TASK_QUEUED} in @var{task}.
  65. These may not be accessed concurrently. */
  66. __attribute__((nonnull (1, 2)))
  67. static inline void
  68. sHT_qadd_try(struct sHT_task_queue *q, struct sHT_task *task)
  69. {
  70. if (sHT_and_any(task->flags, sHT_TASK_QUEUED))
  71. return;
  72. task->flags |= sHT_TASK_QUEUED;
  73. /* Invariants are temporarily violated */
  74. *q->tailp = task;
  75. q->tailp = &task->next;
  76. }
  77. /** Try to take a single object from the front of the queue
  78. @var{q}: any valid queue
  79. @var{task}: an @var{sHT_task} variable
  80. @var{nil}: code to perform if the queue is empty
  81. @var{some}: code to perform if the first task is taken
  82. @var{nil} and @var{some} first argument is @var{q}. They are
  83. called in tail position and their behaviour is not part of this
  84. specification.
  85. The queue and @var{.next} pointers may not be accessed concurrently. */
  86. __attribute__((always_inline))
  87. static inline void
  88. sHT_qtake(struct sHT_task_queue *q, void (* nil)(struct sHT_task_queue *), void (* some)(struct sHT_task_queue *, struct sHT_task *))
  89. {
  90. struct sHT_task *first = q->head;
  91. if (sHT_null_p(first)) {
  92. nil(q);
  93. return;
  94. }
  95. struct sHT_task *next = q->head = first->next;
  96. if (sHT_null_p(next))
  97. /* @code{q->tailp} points to @code{first->next},
  98. but that task is about to be removed, leaving
  99. an empty queue -- reset the tail pointer. */
  100. q->tailp = &q->head;
  101. some(q, first);
  102. }
  103. #if 0
  104. /** Convenience typedef for @var{sHT_perform_all} */
  105. typedef void (sHT_task_perform)(void *cookie, struct sHT_task *);
  106. /** Perform all tasks that have been queued in @var{deck}.
  107. @var{deck} must be non-free and concurrent scheduling and descheduling is
  108. forbidden.
  109. Let all tasks belonging to @var{deck}, even invalid, unscheduled and removed
  110. tasks, be U, which may contain duplicates. Let all scheduled tasks in
  111. @var{deck} be S. (S is a subset of U, and both are multisets.)
  112. Compose P, a -- possibly infinite -- list of tasks, such that every element
  113. of P is in U (but not necessarily having the same multiplicity) and on a
  114. non-speculative execution, there is a bijection between P and S.
  115. (Informally speaking, P are all tasks of that will actually be executed.
  116. Ignoring Spectre, it corresponds to all tasks that have been scheduled.
  117. However, as the Spectre is real, it is possible that, temporarily, tasks will
  118. be executed multiple times and old tasks are 'recycled'.)
  119. Apply @var{f} to each element of P (the first argument is @var{cookie}).
  120. @var{f} is limited in operations to @var{deck}. It may be abandoned.
  121. Let abandonement be:
  122. - a cancellation within this function call, unhandled by @var{f}
  123. - a decision of @var{f} to abandon @var{deck}
  124. - a non-local return by @var{f}, skipping this function call
  125. Abandonement implies @var{deck} may be freed or have its capacity read.
  126. Non-abandonement implies @var{deck} may have its capacity read and that
  127. tasks may be scheduled. */
  128. __attribute__((nonnull (2, 3)))
  129. __attribute__((always_inline))
  130. static inline void
  131. sHT_perform_all(void *cookie, struct sHT_task_deque *deck, sHT_task_perform *f)
  132. {
  133. /* @var{f} may schedule extra tasks, so remember the number of tasks
  134. to perform. */
  135. size_t n_todo = deck->n;
  136. for (size_t i = 0; sHT_gt(n_todo, i); i++) {
  137. size_t j = deck->head + i;
  138. /* Make sure it points to a task belonging to @var{deck},
  139. to take control of Spectre's branch. */
  140. j %= deck->capacity;
  141. struct sHT_task *task = deck->tasks[j];
  142. task->flags &= ~sHT_TASK_SCHEDULED;
  143. f(cookie, task);
  144. }
  145. deck->head = (deck->head + n_todo) % deck->capacity;
  146. deck->n -= n_todo;
  147. }
  148. #endif
  149. /** An non-negative identifier representing a set of kernel resources to watch
  150. for IO readyness. On *BSD, this is a kqueue(2), on Linux, this is an epoll(7)
  151. instance. */
  152. typedef int sHT_watch_set;
  153. enum sHT_watch_set_alloc_error
  154. {
  155. /** The per-process limit on watch sets would have been exceeded. */
  156. sHT_WATCH_SET_ALLOC_EMFILE = -1,
  157. /** The system-wide limit on watch sets would have been exceeded. */
  158. sHT_WATCH_SET_ALLOC_ENFILE = -2,
  159. /** Memory limits would have been exceeded. */
  160. sHT_WATCH_SET_ALLOC_ENOMEM = -3,
  161. };
  162. /** Try to allocate a watch set.
  163. There may be speculative file descriptor leaks. This function may be
  164. performed concurrently. If a call to this function is interrupted by
  165. POSIX asynchronuous cancellation, some file descriptors may be leaked.
  166. If the return value is despeculated and it is non-negative, it is the
  167. identifier of the watch set as a @var{sHT_event_polling}.
  168. If the return value is despeculated and it is negative, it is a negative
  169. @var{sHT_event_polling_alloc_error} error code. */
  170. int
  171. sHT_alloc_watch_set(void);
  172. /** Free the watch set @var{watches}.
  173. @var{watches}: the possibly non-empty watch set to free.
  174. @var{watches} may not be accessed concurrently. If a call to this function
  175. is interrupted by POSIX asynchronuous cancellation, @var{watches} may be
  176. leaked.
  177. There may be speculative file descriptor leaks.
  178. For the avoidance of doubt: @var{watches} must be valid. As such, it must
  179. have been despeculated at some point in time. Double-freeing is not allowed.
  180. This cannot fail in any way. */
  181. void
  182. sHT_free_watch_set(sHT_watch_set watches);
  183. enum sHT_watch_set_install
  184. {
  185. /** The file descriptor has been put into the watch set. */
  186. sHT_WATCH_SET_INSTALL_DONE = 0,
  187. /** The watch could not be allocated due to memory limits. */
  188. sHT_WATCH_SET_INSTALL_ENOMEM = 1,
  189. /** A maximum on the number of watches would have been exceeded
  190. if the watch were allocated */
  191. sHT_WATCH_SET_INSTALL_LIMIT = 2,
  192. };
  193. /** Try to asynchronuously wait for any readyness change on a file descriptor.
  194. @var{watches}: the watch set to put @var{fd} in.
  195. @var{fd}: the pollable file descriptor to register (e.g. a watch set or
  196. socket). Linux does not allow polling on regular files.
  197. @var{task}: the task to continue later, after a change in readyness.
  198. @var{watches} may not be accessed concurrenty and may be modified.
  199. If a call to this function is cancelled, freeing becomes the only operation
  200. allowed upon @var{watches}.
  201. @var{fd} may not already be in @var{polling} and creating loops is not
  202. allowed.
  203. The return value may speculatively be incorrect. There may be speculative
  204. memory leaks.
  205. If the return value is despeculated, it is a valid
  206. @var{sHT_watch_set_install} indicating the result. */
  207. __attribute__((warn_unused_result))
  208. __attribute__((nonnull (3)))
  209. enum sHT_watch_set_install
  210. sHT_install_edge_trigger(sHT_watch_set watches, int fd, struct sHT_task *task);
  211. /** Stop asynchronuous waiting for a readyness change on a file descriptor.
  212. @var{watches}: the watch set to remove @var{fd} from.
  213. @var{fd}: the file descriptor to remove
  214. @var{watches} may not be accessed concurrently and may be modified.
  215. If a call to this function is cancelled, freeing becomes the only operation
  216. allowed upon @var{watches}.
  217. @var{fd} must be in @var{polling}.
  218. There may be speculative memory leaks and the removal may speculatively be
  219. ignored. This cannot fail in any way. */
  220. void
  221. sHT_delete_edge_trigger(sHT_watch_set watches, int fd);
  222. #endif