sxlock.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107
  1. /*
  2. * Copyright (c) 2022 Agustina Arzille.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. *
  17. * Shared-Exclusive locks.
  18. */
  19. #ifndef KERN_SXLOCK_H
  20. #define KERN_SXLOCK_H 1
  21. #include <stdint.h>
  22. #include <kern/atomic.h>
  23. struct sxlock
  24. {
  25. uint32_t lock;
  26. };
  27. #define SXLOCK_WAITERS_BIT 31
  28. #define SXLOCK_WAITERS (1u << SXLOCK_WAITERS_BIT)
  29. #define SXLOCK_MASK (SXLOCK_WAITERS - 1)
  30. static inline void
  31. sxlock_init (struct sxlock *sxp)
  32. {
  33. sxp->lock = 0;
  34. }
  35. static inline int
  36. sxlock_tryexlock (struct sxlock *sxp)
  37. {
  38. return (atomic_load_rlx (&sxp->lock) == 0 &&
  39. atomic_cas_bool_acq (&sxp->lock, 0, SXLOCK_MASK) ? 0 : EBUSY);
  40. }
  41. void sxlock_exlock_slow (struct sxlock *sxp);
  42. static inline void
  43. sxlock_exlock (struct sxlock *sxp)
  44. {
  45. if (sxlock_tryexlock (sxp) != 0)
  46. sxlock_exlock_slow (sxp);
  47. }
  48. static inline int
  49. sxlock_tryshlock (struct sxlock *sxp)
  50. {
  51. uint32_t val = atomic_load_rlx (&sxp->lock);
  52. return ((val & SXLOCK_MASK) != SXLOCK_MASK &&
  53. atomic_cas_bool_acq (&sxp->lock, val, val + 1) ? 0 : EBUSY);
  54. }
  55. void sxlock_shlock_slow (struct sxlock *sxp);
  56. static inline void
  57. sxlock_shlock (struct sxlock *sxp)
  58. {
  59. if (unlikely (sxlock_tryshlock (sxp) != 0))
  60. sxlock_shlock_slow (sxp);
  61. }
  62. void sxlock_unlock (struct sxlock *sxp);
  63. void sxlock_wake (struct sxlock *sxp);
  64. // Mutate an exclusive lock into a shared one.
  65. static inline void
  66. sxlock_share (struct sxlock *sxp)
  67. {
  68. uint32_t prev = atomic_and_rel (&sxp->lock, SXLOCK_WAITERS | 1);
  69. if (prev & SXLOCK_WAITERS)
  70. sxlock_wake (sxp);
  71. }
  72. // Shared-Exclusive lock guards.
  73. static inline void
  74. sxlock_guard_fini (void *ptr)
  75. {
  76. sxlock_unlock (*(struct sxlock **)ptr);
  77. }
  78. #define SXLOCK_GUARD_IMPL(sxp, fn) \
  79. CLEANUP (sxlock_guard_fini) __unused _Auto UNIQ(sxg) = \
  80. ({ \
  81. struct sxlock *sxp_ = (sxp); \
  82. fn (sxp_); \
  83. sxp_; \
  84. })
  85. #define SXLOCK_SHGUARD(sxp) SXLOCK_GUARD_IMPL (sxp, sxlock_shlock)
  86. #define SXLOCK_EXGUARD(sxp) SXLOCK_GUARD_IMPL (sxp, sxlock_exlock)
  87. #endif