rculist_bl.h 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef _LINUX_RCULIST_BL_H
  3. #define _LINUX_RCULIST_BL_H
  4. /*
  5. * RCU-protected bl list version. See include/linux/list_bl.h.
  6. */
  7. #include <linux/list_bl.h>
  8. #include <linux/rcupdate.h>
  9. static inline void hlist_bl_set_first_rcu(struct hlist_bl_head *h,
  10. struct hlist_bl_node *n)
  11. {
  12. LIST_BL_BUG_ON((unsigned long)n & LIST_BL_LOCKMASK);
  13. LIST_BL_BUG_ON(((unsigned long)h->first & LIST_BL_LOCKMASK) !=
  14. LIST_BL_LOCKMASK);
  15. rcu_assign_pointer(h->first,
  16. (struct hlist_bl_node *)((unsigned long)n | LIST_BL_LOCKMASK));
  17. }
  18. static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
  19. {
  20. return (struct hlist_bl_node *)
  21. ((unsigned long)rcu_dereference_check(h->first, hlist_bl_is_locked(h)) & ~LIST_BL_LOCKMASK);
  22. }
  23. /**
  24. * hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization
  25. * @n: the element to delete from the hash list.
  26. *
  27. * Note: hlist_bl_unhashed() on the node returns true after this. It is
  28. * useful for RCU based read lockfree traversal if the writer side
  29. * must know if the list entry is still hashed or already unhashed.
  30. *
  31. * In particular, it means that we can not poison the forward pointers
  32. * that may still be used for walking the hash list and we can only
  33. * zero the pprev pointer so list_unhashed() will return true after
  34. * this.
  35. *
  36. * The caller must take whatever precautions are necessary (such as
  37. * holding appropriate locks) to avoid racing with another
  38. * list-mutation primitive, such as hlist_bl_add_head_rcu() or
  39. * hlist_bl_del_rcu(), running on this same list. However, it is
  40. * perfectly legal to run concurrently with the _rcu list-traversal
  41. * primitives, such as hlist_bl_for_each_entry_rcu().
  42. */
  43. static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n)
  44. {
  45. if (!hlist_bl_unhashed(n)) {
  46. __hlist_bl_del(n);
  47. n->pprev = NULL;
  48. }
  49. }
  50. /**
  51. * hlist_bl_del_rcu - deletes entry from hash list without re-initialization
  52. * @n: the element to delete from the hash list.
  53. *
  54. * Note: hlist_bl_unhashed() on entry does not return true after this,
  55. * the entry is in an undefined state. It is useful for RCU based
  56. * lockfree traversal.
  57. *
  58. * In particular, it means that we can not poison the forward
  59. * pointers that may still be used for walking the hash list.
  60. *
  61. * The caller must take whatever precautions are necessary
  62. * (such as holding appropriate locks) to avoid racing
  63. * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
  64. * or hlist_bl_del_rcu(), running on this same list.
  65. * However, it is perfectly legal to run concurrently with
  66. * the _rcu list-traversal primitives, such as
  67. * hlist_bl_for_each_entry().
  68. */
  69. static inline void hlist_bl_del_rcu(struct hlist_bl_node *n)
  70. {
  71. __hlist_bl_del(n);
  72. n->pprev = LIST_POISON2;
  73. }
  74. /**
  75. * hlist_bl_add_head_rcu
  76. * @n: the element to add to the hash list.
  77. * @h: the list to add to.
  78. *
  79. * Description:
  80. * Adds the specified element to the specified hlist_bl,
  81. * while permitting racing traversals.
  82. *
  83. * The caller must take whatever precautions are necessary
  84. * (such as holding appropriate locks) to avoid racing
  85. * with another list-mutation primitive, such as hlist_bl_add_head_rcu()
  86. * or hlist_bl_del_rcu(), running on this same list.
  87. * However, it is perfectly legal to run concurrently with
  88. * the _rcu list-traversal primitives, such as
  89. * hlist_bl_for_each_entry_rcu(), used to prevent memory-consistency
  90. * problems on Alpha CPUs. Regardless of the type of CPU, the
  91. * list-traversal primitive must be guarded by rcu_read_lock().
  92. */
  93. static inline void hlist_bl_add_head_rcu(struct hlist_bl_node *n,
  94. struct hlist_bl_head *h)
  95. {
  96. struct hlist_bl_node *first;
  97. /* don't need hlist_bl_first_rcu because we're under lock */
  98. first = hlist_bl_first(h);
  99. n->next = first;
  100. if (first)
  101. first->pprev = &n->next;
  102. n->pprev = &h->first;
  103. /* need _rcu because we can have concurrent lock free readers */
  104. hlist_bl_set_first_rcu(h, n);
  105. }
  106. /**
  107. * hlist_bl_for_each_entry_rcu - iterate over rcu list of given type
  108. * @tpos: the type * to use as a loop cursor.
  109. * @pos: the &struct hlist_bl_node to use as a loop cursor.
  110. * @head: the head for your list.
  111. * @member: the name of the hlist_bl_node within the struct.
  112. *
  113. */
  114. #define hlist_bl_for_each_entry_rcu(tpos, pos, head, member) \
  115. for (pos = hlist_bl_first_rcu(head); \
  116. pos && \
  117. ({ tpos = hlist_bl_entry(pos, typeof(*tpos), member); 1; }); \
  118. pos = rcu_dereference_raw(pos->next))
  119. #endif