rtmutex_i.h 2.6 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485
  1. /*
  2. * Copyright (c) 2017 Richard Braun.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation, either version 3 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  16. */
  17. #ifndef KERN_RTMUTEX_I_H
  18. #define KERN_RTMUTEX_I_H
  19. #include <assert.h>
  20. #include <stdbool.h>
  21. #include <stdint.h>
  22. #include <kern/atomic.h>
  23. #include <kern/rtmutex_types.h>
  24. #include <kern/thread.h>
  25. /*
  26. * Real-time mutex flags.
  27. *
  28. * The "contended" flag indicates that threads are waiting for the mutex
  29. * to be unlocked. It forces threads trying to lock the mutex as well as
  30. * the owner to take the slow path.
  31. *
  32. * The "force-wait" flag prevents "stealing" a mutex. When a contended
  33. * mutex is unlocked, a thread may concurrently try to lock it. Without
  34. * this flag, it may succeed, and in doing so, it would prevent a
  35. * potentially higher priority thread from locking the mutex. The flag
  36. * forces all threads to not only take the slow path, but to also call
  37. * the turnstile wait function so that only the highest priority thread
  38. * may lock the mutex.
  39. */
  40. #define RTMUTEX_CONTENDED ((uintptr_t)0x1)
  41. #define RTMUTEX_FORCE_WAIT ((uintptr_t)0x2)
  42. #define RTMUTEX_OWNER_MASK (~((uintptr_t)(RTMUTEX_FORCE_WAIT \
  43. | RTMUTEX_CONTENDED)))
  44. static inline bool
  45. rtmutex_owner_aligned(uintptr_t owner)
  46. {
  47. return (((owner) & ~RTMUTEX_OWNER_MASK) == 0);
  48. }
  49. static inline uintptr_t
  50. rtmutex_lock_fast(struct rtmutex *rtmutex)
  51. {
  52. uintptr_t owner;
  53. owner = (uintptr_t)thread_self();
  54. assert(rtmutex_owner_aligned(owner));
  55. return atomic_cas(&rtmutex->owner, 0, owner, ATOMIC_ACQUIRE);
  56. }
  57. static inline uintptr_t
  58. rtmutex_unlock_fast(struct rtmutex *rtmutex)
  59. {
  60. uintptr_t owner, prev_owner;
  61. owner = (uintptr_t)thread_self();
  62. assert(rtmutex_owner_aligned(owner));
  63. prev_owner = atomic_cas(&rtmutex->owner, owner, 0, ATOMIC_RELEASE);
  64. assert((prev_owner & RTMUTEX_OWNER_MASK) == owner);
  65. return prev_owner;
  66. }
  67. void rtmutex_lock_slow(struct rtmutex *rtmutex);
  68. int rtmutex_timedlock_slow(struct rtmutex *rtmutex, uint64_t ticks);
  69. void rtmutex_unlock_slow(struct rtmutex *rtmutex);
  70. #endif /* KERN_RTMUTEX_I_H */