queue.h 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847
  1. /* $NetBSD: queue.h,v 1.70 2015/11/02 15:21:23 christos Exp $ */
  2. /*
  3. * Copyright (c) 1991, 1993
  4. * The Regents of the University of California. All rights reserved.
  5. *
  6. * Redistribution and use in source and binary forms, with or without
  7. * modification, are permitted provided that the following conditions
  8. * are met:
  9. * 1. Redistributions of source code must retain the above copyright
  10. * notice, this list of conditions and the following disclaimer.
  11. * 2. Redistributions in binary form must reproduce the above copyright
  12. * notice, this list of conditions and the following disclaimer in the
  13. * documentation and/or other materials provided with the distribution.
  14. * 3. Neither the name of the University nor the names of its contributors
  15. * may be used to endorse or promote products derived from this software
  16. * without specific prior written permission.
  17. *
  18. * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
  19. * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
  20. * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
  21. * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  22. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
  23. * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
  24. * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
  25. * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
  26. * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
  27. * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  28. * SUCH DAMAGE.
  29. *
  30. * @(#)queue.h 8.5 (Berkeley) 8/20/94
  31. */
  32. #ifndef _SYS_QUEUE_H_
  33. #define _SYS_QUEUE_H_
  34. /*
  35. * This file defines five types of data structures: singly-linked lists,
  36. * lists, simple queues, tail queues, and circular queues.
  37. *
  38. * A singly-linked list is headed by a single forward pointer. The
  39. * elements are singly linked for minimum space and pointer manipulation
  40. * overhead at the expense of O(n) removal for arbitrary elements. New
  41. * elements can be added to the list after an existing element or at the
  42. * head of the list. Elements being removed from the head of the list
  43. * should use the explicit macro for this purpose for optimum
  44. * efficiency. A singly-linked list may only be traversed in the forward
  45. * direction. Singly-linked lists are ideal for applications with large
  46. * datasets and few or no removals or for implementing a LIFO queue.
  47. *
  48. * A list is headed by a single forward pointer (or an array of forward
  49. * pointers for a hash table header). The elements are doubly linked
  50. * so that an arbitrary element can be removed without a need to
  51. * traverse the list. New elements can be added to the list before
  52. * or after an existing element or at the head of the list. A list
  53. * may only be traversed in the forward direction.
  54. *
  55. * A simple queue is headed by a pair of pointers, one the head of the
  56. * list and the other to the tail of the list. The elements are singly
  57. * linked to save space, so elements can only be removed from the
  58. * head of the list. New elements can be added to the list after
  59. * an existing element, at the head of the list, or at the end of the
  60. * list. A simple queue may only be traversed in the forward direction.
  61. *
  62. * A tail queue is headed by a pair of pointers, one to the head of the
  63. * list and the other to the tail of the list. The elements are doubly
  64. * linked so that an arbitrary element can be removed without a need to
  65. * traverse the list. New elements can be added to the list before or
  66. * after an existing element, at the head of the list, or at the end of
  67. * the list. A tail queue may be traversed in either direction.
  68. *
  69. * A circle queue is headed by a pair of pointers, one to the head of the
  70. * list and the other to the tail of the list. The elements are doubly
  71. * linked so that an arbitrary element can be removed without a need to
  72. * traverse the list. New elements can be added to the list before or after
  73. * an existing element, at the head of the list, or at the end of the list.
  74. * A circle queue may be traversed in either direction, but has a more
  75. * complex end of list detection.
  76. *
  77. * For details on the use of these macros, see the queue(3) manual page.
  78. */
  79. /*
  80. * Include the definition of NULL only on NetBSD because sys/null.h
  81. * is not available elsewhere. This conditional makes the header
  82. * portable and it can simply be dropped verbatim into any system.
  83. * The caveat is that on other systems some other header
  84. * must provide NULL before the macros can be used.
  85. */
  86. #ifdef __NetBSD__
  87. #include <sys/null.h>
  88. #endif
  89. #if defined(QUEUEDEBUG)
  90. # if defined(_KERNEL)
  91. # define QUEUEDEBUG_ABORT(...) panic(__VA_ARGS__)
  92. # else
  93. # include <err.h>
  94. # define QUEUEDEBUG_ABORT(...) err(1, __VA_ARGS__)
  95. # endif
  96. #endif
  97. /*
  98. * Singly-linked List definitions.
  99. */
  100. #define SLIST_HEAD(name, type) \
  101. struct name { \
  102. struct type *slh_first; /* first element */ \
  103. }
  104. #define SLIST_HEAD_INITIALIZER(head) \
  105. { NULL }
  106. #define SLIST_ENTRY(type) \
  107. struct { \
  108. struct type *sle_next; /* next element */ \
  109. }
  110. /*
  111. * Singly-linked List access methods.
  112. */
  113. #define SLIST_FIRST(head) ((head)->slh_first)
  114. #define SLIST_END(head) NULL
  115. #define SLIST_EMPTY(head) ((head)->slh_first == NULL)
  116. #define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
  117. #define SLIST_FOREACH(var, head, field) \
  118. for((var) = (head)->slh_first; \
  119. (var) != SLIST_END(head); \
  120. (var) = (var)->field.sle_next)
  121. #define SLIST_FOREACH_SAFE(var, head, field, tvar) \
  122. for ((var) = SLIST_FIRST((head)); \
  123. (var) != SLIST_END(head) && \
  124. ((tvar) = SLIST_NEXT((var), field), 1); \
  125. (var) = (tvar))
  126. /*
  127. * Singly-linked List functions.
  128. */
  129. #define SLIST_INIT(head) do { \
  130. (head)->slh_first = SLIST_END(head); \
  131. } while (/*CONSTCOND*/0)
  132. #define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
  133. (elm)->field.sle_next = (slistelm)->field.sle_next; \
  134. (slistelm)->field.sle_next = (elm); \
  135. } while (/*CONSTCOND*/0)
  136. #define SLIST_INSERT_HEAD(head, elm, field) do { \
  137. (elm)->field.sle_next = (head)->slh_first; \
  138. (head)->slh_first = (elm); \
  139. } while (/*CONSTCOND*/0)
  140. #define SLIST_REMOVE_AFTER(slistelm, field) do { \
  141. (slistelm)->field.sle_next = \
  142. SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \
  143. } while (/*CONSTCOND*/0)
  144. #define SLIST_REMOVE_HEAD(head, field) do { \
  145. (head)->slh_first = (head)->slh_first->field.sle_next; \
  146. } while (/*CONSTCOND*/0)
  147. #define SLIST_REMOVE(head, elm, type, field) do { \
  148. if ((head)->slh_first == (elm)) { \
  149. SLIST_REMOVE_HEAD((head), field); \
  150. } \
  151. else { \
  152. struct type *curelm = (head)->slh_first; \
  153. while(curelm->field.sle_next != (elm)) \
  154. curelm = curelm->field.sle_next; \
  155. curelm->field.sle_next = \
  156. curelm->field.sle_next->field.sle_next; \
  157. } \
  158. } while (/*CONSTCOND*/0)
  159. /*
  160. * List definitions.
  161. */
  162. #define LIST_HEAD(name, type) \
  163. struct name { \
  164. struct type *lh_first; /* first element */ \
  165. }
  166. #define LIST_HEAD_INITIALIZER(head) \
  167. { NULL }
  168. #define LIST_ENTRY(type) \
  169. struct { \
  170. struct type *le_next; /* next element */ \
  171. struct type **le_prev; /* address of previous next element */ \
  172. }
  173. /*
  174. * List access methods.
  175. */
  176. #define LIST_FIRST(head) ((head)->lh_first)
  177. #define LIST_END(head) NULL
  178. #define LIST_EMPTY(head) ((head)->lh_first == LIST_END(head))
  179. #define LIST_NEXT(elm, field) ((elm)->field.le_next)
  180. #define LIST_FOREACH(var, head, field) \
  181. for ((var) = ((head)->lh_first); \
  182. (var) != LIST_END(head); \
  183. (var) = ((var)->field.le_next))
  184. #define LIST_FOREACH_SAFE(var, head, field, tvar) \
  185. for ((var) = LIST_FIRST((head)); \
  186. (var) != LIST_END(head) && \
  187. ((tvar) = LIST_NEXT((var), field), 1); \
  188. (var) = (tvar))
  189. #define LIST_MOVE(head1, head2) do { \
  190. LIST_INIT((head2)); \
  191. if (!LIST_EMPTY((head1))) { \
  192. (head2)->lh_first = (head1)->lh_first; \
  193. LIST_INIT((head1)); \
  194. } \
  195. } while (/*CONSTCOND*/0)
  196. /*
  197. * List functions.
  198. */
  199. #if defined(QUEUEDEBUG)
  200. #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
  201. if ((head)->lh_first && \
  202. (head)->lh_first->field.le_prev != &(head)->lh_first) \
  203. QUEUEDEBUG_ABORT("LIST_INSERT_HEAD %p %s:%d", (head), \
  204. __FILE__, __LINE__);
  205. #define QUEUEDEBUG_LIST_OP(elm, field) \
  206. if ((elm)->field.le_next && \
  207. (elm)->field.le_next->field.le_prev != \
  208. &(elm)->field.le_next) \
  209. QUEUEDEBUG_ABORT("LIST_* forw %p %s:%d", (elm), \
  210. __FILE__, __LINE__); \
  211. if (*(elm)->field.le_prev != (elm)) \
  212. QUEUEDEBUG_ABORT("LIST_* back %p %s:%d", (elm), \
  213. __FILE__, __LINE__);
  214. #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
  215. (elm)->field.le_next = (void *)1L; \
  216. (elm)->field.le_prev = (void *)1L;
  217. #else
  218. #define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
  219. #define QUEUEDEBUG_LIST_OP(elm, field)
  220. #define QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
  221. #endif
  222. #define LIST_INIT(head) do { \
  223. (head)->lh_first = LIST_END(head); \
  224. } while (/*CONSTCOND*/0)
  225. #define LIST_INSERT_AFTER(listelm, elm, field) do { \
  226. QUEUEDEBUG_LIST_OP((listelm), field) \
  227. if (((elm)->field.le_next = (listelm)->field.le_next) != \
  228. LIST_END(head)) \
  229. (listelm)->field.le_next->field.le_prev = \
  230. &(elm)->field.le_next; \
  231. (listelm)->field.le_next = (elm); \
  232. (elm)->field.le_prev = &(listelm)->field.le_next; \
  233. } while (/*CONSTCOND*/0)
  234. #define LIST_INSERT_BEFORE(listelm, elm, field) do { \
  235. QUEUEDEBUG_LIST_OP((listelm), field) \
  236. (elm)->field.le_prev = (listelm)->field.le_prev; \
  237. (elm)->field.le_next = (listelm); \
  238. *(listelm)->field.le_prev = (elm); \
  239. (listelm)->field.le_prev = &(elm)->field.le_next; \
  240. } while (/*CONSTCOND*/0)
  241. #define LIST_INSERT_HEAD(head, elm, field) do { \
  242. QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
  243. if (((elm)->field.le_next = (head)->lh_first) != LIST_END(head))\
  244. (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
  245. (head)->lh_first = (elm); \
  246. (elm)->field.le_prev = &(head)->lh_first; \
  247. } while (/*CONSTCOND*/0)
  248. #define LIST_REMOVE(elm, field) do { \
  249. QUEUEDEBUG_LIST_OP((elm), field) \
  250. if ((elm)->field.le_next != NULL) \
  251. (elm)->field.le_next->field.le_prev = \
  252. (elm)->field.le_prev; \
  253. *(elm)->field.le_prev = (elm)->field.le_next; \
  254. QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
  255. } while (/*CONSTCOND*/0)
  256. #define LIST_REPLACE(elm, elm2, field) do { \
  257. if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
  258. (elm2)->field.le_next->field.le_prev = \
  259. &(elm2)->field.le_next; \
  260. (elm2)->field.le_prev = (elm)->field.le_prev; \
  261. *(elm2)->field.le_prev = (elm2); \
  262. QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
  263. } while (/*CONSTCOND*/0)
  264. /*
  265. * Simple queue definitions.
  266. */
  267. #define SIMPLEQ_HEAD(name, type) \
  268. struct name { \
  269. struct type *sqh_first; /* first element */ \
  270. struct type **sqh_last; /* addr of last next element */ \
  271. }
  272. #define SIMPLEQ_HEAD_INITIALIZER(head) \
  273. { NULL, &(head).sqh_first }
  274. #define SIMPLEQ_ENTRY(type) \
  275. struct { \
  276. struct type *sqe_next; /* next element */ \
  277. }
  278. /*
  279. * Simple queue access methods.
  280. */
  281. #define SIMPLEQ_FIRST(head) ((head)->sqh_first)
  282. #define SIMPLEQ_END(head) NULL
  283. #define SIMPLEQ_EMPTY(head) ((head)->sqh_first == SIMPLEQ_END(head))
  284. #define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
  285. #define SIMPLEQ_FOREACH(var, head, field) \
  286. for ((var) = ((head)->sqh_first); \
  287. (var) != SIMPLEQ_END(head); \
  288. (var) = ((var)->field.sqe_next))
  289. #define SIMPLEQ_FOREACH_SAFE(var, head, field, next) \
  290. for ((var) = ((head)->sqh_first); \
  291. (var) != SIMPLEQ_END(head) && \
  292. ((next = ((var)->field.sqe_next)), 1); \
  293. (var) = (next))
  294. /*
  295. * Simple queue functions.
  296. */
  297. #define SIMPLEQ_INIT(head) do { \
  298. (head)->sqh_first = NULL; \
  299. (head)->sqh_last = &(head)->sqh_first; \
  300. } while (/*CONSTCOND*/0)
  301. #define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
  302. if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
  303. (head)->sqh_last = &(elm)->field.sqe_next; \
  304. (head)->sqh_first = (elm); \
  305. } while (/*CONSTCOND*/0)
  306. #define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
  307. (elm)->field.sqe_next = NULL; \
  308. *(head)->sqh_last = (elm); \
  309. (head)->sqh_last = &(elm)->field.sqe_next; \
  310. } while (/*CONSTCOND*/0)
  311. #define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
  312. if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
  313. (head)->sqh_last = &(elm)->field.sqe_next; \
  314. (listelm)->field.sqe_next = (elm); \
  315. } while (/*CONSTCOND*/0)
  316. #define SIMPLEQ_REMOVE_HEAD(head, field) do { \
  317. if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
  318. (head)->sqh_last = &(head)->sqh_first; \
  319. } while (/*CONSTCOND*/0)
  320. #define SIMPLEQ_REMOVE_AFTER(head, elm, field) do { \
  321. if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \
  322. == NULL) \
  323. (head)->sqh_last = &(elm)->field.sqe_next; \
  324. } while (/*CONSTCOND*/0)
  325. #define SIMPLEQ_REMOVE(head, elm, type, field) do { \
  326. if ((head)->sqh_first == (elm)) { \
  327. SIMPLEQ_REMOVE_HEAD((head), field); \
  328. } else { \
  329. struct type *curelm = (head)->sqh_first; \
  330. while (curelm->field.sqe_next != (elm)) \
  331. curelm = curelm->field.sqe_next; \
  332. if ((curelm->field.sqe_next = \
  333. curelm->field.sqe_next->field.sqe_next) == NULL) \
  334. (head)->sqh_last = &(curelm)->field.sqe_next; \
  335. } \
  336. } while (/*CONSTCOND*/0)
  337. #define SIMPLEQ_CONCAT(head1, head2) do { \
  338. if (!SIMPLEQ_EMPTY((head2))) { \
  339. *(head1)->sqh_last = (head2)->sqh_first; \
  340. (head1)->sqh_last = (head2)->sqh_last; \
  341. SIMPLEQ_INIT((head2)); \
  342. } \
  343. } while (/*CONSTCOND*/0)
  344. #define SIMPLEQ_LAST(head, type, field) \
  345. (SIMPLEQ_EMPTY((head)) ? \
  346. NULL : \
  347. ((struct type *)(void *) \
  348. ((char *)((head)->sqh_last) - offsetof(struct type, field))))
  349. /*
  350. * Tail queue definitions.
  351. */
  352. #define _TAILQ_HEAD(name, type, qual) \
  353. struct name { \
  354. qual type *tqh_first; /* first element */ \
  355. qual type *qual *tqh_last; /* addr of last next element */ \
  356. }
  357. #define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,)
  358. #define TAILQ_HEAD_INITIALIZER(head) \
  359. { TAILQ_END(head), &(head).tqh_first }
  360. #define _TAILQ_ENTRY(type, qual) \
  361. struct { \
  362. qual type *tqe_next; /* next element */ \
  363. qual type *qual *tqe_prev; /* address of previous next element */\
  364. }
  365. #define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,)
  366. /*
  367. * Tail queue access methods.
  368. */
  369. #define TAILQ_FIRST(head) ((head)->tqh_first)
  370. #define TAILQ_END(head) (NULL)
  371. #define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
  372. #define TAILQ_LAST(head, headname) \
  373. (*(((struct headname *)(void *)((head)->tqh_last))->tqh_last))
  374. #define TAILQ_PREV(elm, headname, field) \
  375. (*(((struct headname *)(void *)((elm)->field.tqe_prev))->tqh_last))
  376. #define TAILQ_EMPTY(head) (TAILQ_FIRST(head) == TAILQ_END(head))
  377. #define TAILQ_FOREACH(var, head, field) \
  378. for ((var) = ((head)->tqh_first); \
  379. (var) != TAILQ_END(head); \
  380. (var) = ((var)->field.tqe_next))
  381. #define TAILQ_FOREACH_SAFE(var, head, field, next) \
  382. for ((var) = ((head)->tqh_first); \
  383. (var) != TAILQ_END(head) && \
  384. ((next) = TAILQ_NEXT(var, field), 1); (var) = (next))
  385. #define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
  386. for ((var) = TAILQ_LAST((head), headname); \
  387. (var) != TAILQ_END(head); \
  388. (var) = TAILQ_PREV((var), headname, field))
  389. #define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \
  390. for ((var) = TAILQ_LAST((head), headname); \
  391. (var) != TAILQ_END(head) && \
  392. ((prev) = TAILQ_PREV((var), headname, field), 1); (var) = (prev))
  393. /*
  394. * Tail queue functions.
  395. */
  396. #if defined(QUEUEDEBUG)
  397. #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \
  398. if ((head)->tqh_first && \
  399. (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
  400. QUEUEDEBUG_ABORT("TAILQ_INSERT_HEAD %p %s:%d", (head), \
  401. __FILE__, __LINE__);
  402. #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \
  403. if (*(head)->tqh_last != NULL) \
  404. QUEUEDEBUG_ABORT("TAILQ_INSERT_TAIL %p %s:%d", (head), \
  405. __FILE__, __LINE__);
  406. #define QUEUEDEBUG_TAILQ_OP(elm, field) \
  407. if ((elm)->field.tqe_next && \
  408. (elm)->field.tqe_next->field.tqe_prev != \
  409. &(elm)->field.tqe_next) \
  410. QUEUEDEBUG_ABORT("TAILQ_* forw %p %s:%d", (elm), \
  411. __FILE__, __LINE__); \
  412. if (*(elm)->field.tqe_prev != (elm)) \
  413. QUEUEDEBUG_ABORT("TAILQ_* back %p %s:%d", (elm), \
  414. __FILE__, __LINE__);
  415. #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \
  416. if ((elm)->field.tqe_next == NULL && \
  417. (head)->tqh_last != &(elm)->field.tqe_next) \
  418. QUEUEDEBUG_ABORT("TAILQ_PREREMOVE head %p elm %p %s:%d",\
  419. (head), (elm), __FILE__, __LINE__);
  420. #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \
  421. (elm)->field.tqe_next = (void *)1L; \
  422. (elm)->field.tqe_prev = (void *)1L;
  423. #else
  424. #define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
  425. #define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
  426. #define QUEUEDEBUG_TAILQ_OP(elm, field)
  427. #define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
  428. #define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
  429. #endif
  430. #define TAILQ_INIT(head) do { \
  431. (head)->tqh_first = TAILQ_END(head); \
  432. (head)->tqh_last = &(head)->tqh_first; \
  433. } while (/*CONSTCOND*/0)
  434. #define TAILQ_INSERT_HEAD(head, elm, field) do { \
  435. QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \
  436. if (((elm)->field.tqe_next = (head)->tqh_first) != TAILQ_END(head))\
  437. (head)->tqh_first->field.tqe_prev = \
  438. &(elm)->field.tqe_next; \
  439. else \
  440. (head)->tqh_last = &(elm)->field.tqe_next; \
  441. (head)->tqh_first = (elm); \
  442. (elm)->field.tqe_prev = &(head)->tqh_first; \
  443. } while (/*CONSTCOND*/0)
  444. #define TAILQ_INSERT_TAIL(head, elm, field) do { \
  445. QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \
  446. (elm)->field.tqe_next = TAILQ_END(head); \
  447. (elm)->field.tqe_prev = (head)->tqh_last; \
  448. *(head)->tqh_last = (elm); \
  449. (head)->tqh_last = &(elm)->field.tqe_next; \
  450. } while (/*CONSTCOND*/0)
  451. #define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
  452. QUEUEDEBUG_TAILQ_OP((listelm), field) \
  453. if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != \
  454. TAILQ_END(head)) \
  455. (elm)->field.tqe_next->field.tqe_prev = \
  456. &(elm)->field.tqe_next; \
  457. else \
  458. (head)->tqh_last = &(elm)->field.tqe_next; \
  459. (listelm)->field.tqe_next = (elm); \
  460. (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
  461. } while (/*CONSTCOND*/0)
  462. #define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
  463. QUEUEDEBUG_TAILQ_OP((listelm), field) \
  464. (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
  465. (elm)->field.tqe_next = (listelm); \
  466. *(listelm)->field.tqe_prev = (elm); \
  467. (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
  468. } while (/*CONSTCOND*/0)
  469. #define TAILQ_REMOVE(head, elm, field) do { \
  470. QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \
  471. QUEUEDEBUG_TAILQ_OP((elm), field) \
  472. if (((elm)->field.tqe_next) != TAILQ_END(head)) \
  473. (elm)->field.tqe_next->field.tqe_prev = \
  474. (elm)->field.tqe_prev; \
  475. else \
  476. (head)->tqh_last = (elm)->field.tqe_prev; \
  477. *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
  478. QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
  479. } while (/*CONSTCOND*/0)
  480. #define TAILQ_REPLACE(head, elm, elm2, field) do { \
  481. if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != \
  482. TAILQ_END(head)) \
  483. (elm2)->field.tqe_next->field.tqe_prev = \
  484. &(elm2)->field.tqe_next; \
  485. else \
  486. (head)->tqh_last = &(elm2)->field.tqe_next; \
  487. (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
  488. *(elm2)->field.tqe_prev = (elm2); \
  489. QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
  490. } while (/*CONSTCOND*/0)
  491. #define TAILQ_CONCAT(head1, head2, field) do { \
  492. if (!TAILQ_EMPTY(head2)) { \
  493. *(head1)->tqh_last = (head2)->tqh_first; \
  494. (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
  495. (head1)->tqh_last = (head2)->tqh_last; \
  496. TAILQ_INIT((head2)); \
  497. } \
  498. } while (/*CONSTCOND*/0)
  499. /*
  500. * Singly-linked Tail queue declarations.
  501. */
  502. #define STAILQ_HEAD(name, type) \
  503. struct name { \
  504. struct type *stqh_first; /* first element */ \
  505. struct type **stqh_last; /* addr of last next element */ \
  506. }
  507. #define STAILQ_HEAD_INITIALIZER(head) \
  508. { NULL, &(head).stqh_first }
  509. #define STAILQ_ENTRY(type) \
  510. struct { \
  511. struct type *stqe_next; /* next element */ \
  512. }
  513. /*
  514. * Singly-linked Tail queue access methods.
  515. */
  516. #define STAILQ_FIRST(head) ((head)->stqh_first)
  517. #define STAILQ_END(head) NULL
  518. #define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
  519. #define STAILQ_EMPTY(head) (STAILQ_FIRST(head) == STAILQ_END(head))
  520. /*
  521. * Singly-linked Tail queue functions.
  522. */
  523. #define STAILQ_INIT(head) do { \
  524. (head)->stqh_first = NULL; \
  525. (head)->stqh_last = &(head)->stqh_first; \
  526. } while (/*CONSTCOND*/0)
  527. #define STAILQ_INSERT_HEAD(head, elm, field) do { \
  528. if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
  529. (head)->stqh_last = &(elm)->field.stqe_next; \
  530. (head)->stqh_first = (elm); \
  531. } while (/*CONSTCOND*/0)
  532. #define STAILQ_INSERT_TAIL(head, elm, field) do { \
  533. (elm)->field.stqe_next = NULL; \
  534. *(head)->stqh_last = (elm); \
  535. (head)->stqh_last = &(elm)->field.stqe_next; \
  536. } while (/*CONSTCOND*/0)
  537. #define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
  538. if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
  539. (head)->stqh_last = &(elm)->field.stqe_next; \
  540. (listelm)->field.stqe_next = (elm); \
  541. } while (/*CONSTCOND*/0)
  542. #define STAILQ_REMOVE_HEAD(head, field) do { \
  543. if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
  544. (head)->stqh_last = &(head)->stqh_first; \
  545. } while (/*CONSTCOND*/0)
  546. #define STAILQ_REMOVE(head, elm, type, field) do { \
  547. if ((head)->stqh_first == (elm)) { \
  548. STAILQ_REMOVE_HEAD((head), field); \
  549. } else { \
  550. struct type *curelm = (head)->stqh_first; \
  551. while (curelm->field.stqe_next != (elm)) \
  552. curelm = curelm->field.stqe_next; \
  553. if ((curelm->field.stqe_next = \
  554. curelm->field.stqe_next->field.stqe_next) == NULL) \
  555. (head)->stqh_last = &(curelm)->field.stqe_next; \
  556. } \
  557. } while (/*CONSTCOND*/0)
  558. #define STAILQ_FOREACH(var, head, field) \
  559. for ((var) = ((head)->stqh_first); \
  560. (var); \
  561. (var) = ((var)->field.stqe_next))
  562. #define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
  563. for ((var) = STAILQ_FIRST((head)); \
  564. (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
  565. (var) = (tvar))
  566. #define STAILQ_CONCAT(head1, head2) do { \
  567. if (!STAILQ_EMPTY((head2))) { \
  568. *(head1)->stqh_last = (head2)->stqh_first; \
  569. (head1)->stqh_last = (head2)->stqh_last; \
  570. STAILQ_INIT((head2)); \
  571. } \
  572. } while (/*CONSTCOND*/0)
  573. #define STAILQ_LAST(head, type, field) \
  574. (STAILQ_EMPTY((head)) ? \
  575. NULL : \
  576. ((struct type *)(void *) \
  577. ((char *)((head)->stqh_last) - offsetof(struct type, field))))
  578. #ifndef _KERNEL
  579. /*
  580. * Circular queue definitions. Do not use. We still keep the macros
  581. * for compatibility but because of pointer aliasing issues their use
  582. * is discouraged!
  583. */
  584. /*
  585. * __launder_type(): We use this ugly hack to work around the the compiler
  586. * noticing that two types may not alias each other and elide tests in code.
  587. * We hit this in the CIRCLEQ macros when comparing 'struct name *' and
  588. * 'struct type *' (see CIRCLEQ_HEAD()). Modern compilers (such as GCC
  589. * 4.8) declare these comparisons as always false, causing the code to
  590. * not run as designed.
  591. *
  592. * This hack is only to be used for comparisons and thus can be fully const.
  593. * Do not use for assignment.
  594. *
  595. * If we ever choose to change the ABI of the CIRCLEQ macros, we could fix
  596. * this by changing the head/tail sentinal values, but see the note above
  597. * this one.
  598. */
  599. static __inline const void * __launder_type(const void *);
  600. static __inline const void *
  601. __launder_type(const void *__x)
  602. {
  603. __asm __volatile("" : "+r" (__x));
  604. return __x;
  605. }
  606. #if defined(QUEUEDEBUG)
  607. #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \
  608. if ((head)->cqh_first != CIRCLEQ_ENDC(head) && \
  609. (head)->cqh_first->field.cqe_prev != CIRCLEQ_ENDC(head)) \
  610. QUEUEDEBUG_ABORT("CIRCLEQ head forw %p %s:%d", (head), \
  611. __FILE__, __LINE__); \
  612. if ((head)->cqh_last != CIRCLEQ_ENDC(head) && \
  613. (head)->cqh_last->field.cqe_next != CIRCLEQ_ENDC(head)) \
  614. QUEUEDEBUG_ABORT("CIRCLEQ head back %p %s:%d", (head), \
  615. __FILE__, __LINE__);
  616. #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \
  617. if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) { \
  618. if ((head)->cqh_last != (elm)) \
  619. QUEUEDEBUG_ABORT("CIRCLEQ elm last %p %s:%d", \
  620. (elm), __FILE__, __LINE__); \
  621. } else { \
  622. if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \
  623. QUEUEDEBUG_ABORT("CIRCLEQ elm forw %p %s:%d", \
  624. (elm), __FILE__, __LINE__); \
  625. } \
  626. if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) { \
  627. if ((head)->cqh_first != (elm)) \
  628. QUEUEDEBUG_ABORT("CIRCLEQ elm first %p %s:%d", \
  629. (elm), __FILE__, __LINE__); \
  630. } else { \
  631. if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \
  632. QUEUEDEBUG_ABORT("CIRCLEQ elm prev %p %s:%d", \
  633. (elm), __FILE__, __LINE__); \
  634. }
  635. #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \
  636. (elm)->field.cqe_next = (void *)1L; \
  637. (elm)->field.cqe_prev = (void *)1L;
  638. #else
  639. #define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
  640. #define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
  641. #define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
  642. #endif
  643. #define CIRCLEQ_HEAD(name, type) \
  644. struct name { \
  645. struct type *cqh_first; /* first element */ \
  646. struct type *cqh_last; /* last element */ \
  647. }
  648. #define CIRCLEQ_HEAD_INITIALIZER(head) \
  649. { CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
  650. #define CIRCLEQ_ENTRY(type) \
  651. struct { \
  652. struct type *cqe_next; /* next element */ \
  653. struct type *cqe_prev; /* previous element */ \
  654. }
  655. /*
  656. * Circular queue functions.
  657. */
  658. #define CIRCLEQ_INIT(head) do { \
  659. (head)->cqh_first = CIRCLEQ_END(head); \
  660. (head)->cqh_last = CIRCLEQ_END(head); \
  661. } while (/*CONSTCOND*/0)
  662. #define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
  663. QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
  664. QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
  665. (elm)->field.cqe_next = (listelm)->field.cqe_next; \
  666. (elm)->field.cqe_prev = (listelm); \
  667. if ((listelm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
  668. (head)->cqh_last = (elm); \
  669. else \
  670. (listelm)->field.cqe_next->field.cqe_prev = (elm); \
  671. (listelm)->field.cqe_next = (elm); \
  672. } while (/*CONSTCOND*/0)
  673. #define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
  674. QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
  675. QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
  676. (elm)->field.cqe_next = (listelm); \
  677. (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
  678. if ((listelm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
  679. (head)->cqh_first = (elm); \
  680. else \
  681. (listelm)->field.cqe_prev->field.cqe_next = (elm); \
  682. (listelm)->field.cqe_prev = (elm); \
  683. } while (/*CONSTCOND*/0)
  684. #define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
  685. QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
  686. (elm)->field.cqe_next = (head)->cqh_first; \
  687. (elm)->field.cqe_prev = CIRCLEQ_END(head); \
  688. if ((head)->cqh_last == CIRCLEQ_ENDC(head)) \
  689. (head)->cqh_last = (elm); \
  690. else \
  691. (head)->cqh_first->field.cqe_prev = (elm); \
  692. (head)->cqh_first = (elm); \
  693. } while (/*CONSTCOND*/0)
  694. #define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
  695. QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
  696. (elm)->field.cqe_next = CIRCLEQ_END(head); \
  697. (elm)->field.cqe_prev = (head)->cqh_last; \
  698. if ((head)->cqh_first == CIRCLEQ_ENDC(head)) \
  699. (head)->cqh_first = (elm); \
  700. else \
  701. (head)->cqh_last->field.cqe_next = (elm); \
  702. (head)->cqh_last = (elm); \
  703. } while (/*CONSTCOND*/0)
  704. #define CIRCLEQ_REMOVE(head, elm, field) do { \
  705. QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
  706. QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \
  707. if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
  708. (head)->cqh_last = (elm)->field.cqe_prev; \
  709. else \
  710. (elm)->field.cqe_next->field.cqe_prev = \
  711. (elm)->field.cqe_prev; \
  712. if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
  713. (head)->cqh_first = (elm)->field.cqe_next; \
  714. else \
  715. (elm)->field.cqe_prev->field.cqe_next = \
  716. (elm)->field.cqe_next; \
  717. QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \
  718. } while (/*CONSTCOND*/0)
  719. #define CIRCLEQ_FOREACH(var, head, field) \
  720. for ((var) = ((head)->cqh_first); \
  721. (var) != CIRCLEQ_ENDC(head); \
  722. (var) = ((var)->field.cqe_next))
  723. #define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
  724. for ((var) = ((head)->cqh_last); \
  725. (var) != CIRCLEQ_ENDC(head); \
  726. (var) = ((var)->field.cqe_prev))
  727. /*
  728. * Circular queue access methods.
  729. */
  730. #define CIRCLEQ_FIRST(head) ((head)->cqh_first)
  731. #define CIRCLEQ_LAST(head) ((head)->cqh_last)
  732. /* For comparisons */
  733. #define CIRCLEQ_ENDC(head) (__launder_type(head))
  734. /* For assignments */
  735. #define CIRCLEQ_END(head) ((void *)(head))
  736. #define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
  737. #define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
  738. #define CIRCLEQ_EMPTY(head) \
  739. (CIRCLEQ_FIRST(head) == CIRCLEQ_ENDC(head))
  740. #define CIRCLEQ_LOOP_NEXT(head, elm, field) \
  741. (((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
  742. ? ((head)->cqh_first) \
  743. : (elm->field.cqe_next))
  744. #define CIRCLEQ_LOOP_PREV(head, elm, field) \
  745. (((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
  746. ? ((head)->cqh_last) \
  747. : (elm->field.cqe_prev))
  748. #endif /* !_KERNEL */
  749. #endif /* !_SYS_QUEUE_H_ */