adaptive_lock.h 2.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. /*
  2. * Copyright (c) 2017 Agustina Arzille.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #ifndef KERN_ADAPTIVE_LOCK_I_H
  18. #define KERN_ADAPTIVE_LOCK_I_H
  19. #include <assert.h>
  20. #include <errno.h>
  21. #include <stdbool.h>
  22. #include <stdint.h>
  23. #include <kern/atomic.h>
  24. #include <kern/init.h>
  25. #include <kern/macros.h>
  26. #include <kern/thread.h>
  27. struct adaptive_lock
  28. {
  29. uintptr_t owner;
  30. };
  31. /*
  32. * Adaptive lock flags.
  33. *
  34. * The "contended" flag indicates that threads are waiting for the lock
  35. * to be released, potentially spinning on the owner. It forces threads
  36. * trying to acquire the lock as well as the owner to take the slow path.
  37. */
  38. #define ADAPTIVE_LOCK_CONTENDED 0x1UL
  39. static inline void
  40. adaptive_lock_init (struct adaptive_lock *lock)
  41. {
  42. lock->owner = 0;
  43. }
  44. static inline int
  45. adaptive_lock_tryacq (struct adaptive_lock *lock)
  46. {
  47. uintptr_t owner = atomic_cas_acq (&lock->owner, 0,
  48. (uintptr_t)thread_self ());
  49. return (owner ? EBUSY : 0);
  50. }
  51. #define adaptive_lock_acquire_fast adaptive_lock_tryacq
  52. static inline int
  53. adaptive_lock_release_fast (struct adaptive_lock *lock)
  54. {
  55. uintptr_t prev = atomic_cas_rel (&lock->owner, (uintptr_t)thread_self (), 0);
  56. return ((prev & ADAPTIVE_LOCK_CONTENDED) ? EBUSY : 0);
  57. }
  58. void adaptive_lock_acquire_slow (struct adaptive_lock *lock);
  59. void adaptive_lock_release_slow (struct adaptive_lock *lock);
  60. static inline void
  61. adaptive_lock_acquire (struct adaptive_lock *lock)
  62. {
  63. if (unlikely (adaptive_lock_acquire_fast (lock) != 0))
  64. adaptive_lock_acquire_slow (lock);
  65. }
  66. static inline void
  67. adaptive_lock_release (struct adaptive_lock *lock)
  68. {
  69. if (unlikely (adaptive_lock_release_fast (lock) != 0))
  70. adaptive_lock_release_slow (lock);
  71. }
  72. // Adaptive lock guards.
  73. static inline void
  74. adaptive_lock_guard_fini (void *ptr)
  75. {
  76. adaptive_lock_release (*(struct adaptive_lock **)ptr);
  77. }
  78. #define ADAPTIVE_LOCK_GUARD(lock) \
  79. CLEANUP (adaptive_lock_guard_fini) __unused _Auto UNIQ(alg) = \
  80. ({ \
  81. struct adaptive_lock *lock_ = (lock); \
  82. adaptive_lock_acquire (lock_); \
  83. lock_; \
  84. })
  85. #endif