123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141 |
- /*
- * Queued spinlock
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
- * (C) Copyright 2015 Hewlett-Packard Enterprise Development LP
- *
- * Authors: Waiman Long <waiman.long@hpe.com>
- */
- #ifndef __ASM_GENERIC_QSPINLOCK_H
- #define __ASM_GENERIC_QSPINLOCK_H
- #include <asm-generic/qspinlock_types.h>
- /**
- * queued_spin_unlock_wait - wait until the _current_ lock holder releases the lock
- * @lock : Pointer to queued spinlock structure
- *
- * There is a very slight possibility of live-lock if the lockers keep coming
- * and the waiter is just unfortunate enough to not see any unlock state.
- */
- #ifndef queued_spin_unlock_wait
- extern void queued_spin_unlock_wait(struct qspinlock *lock);
- #endif
- /**
- * queued_spin_is_locked - is the spinlock locked?
- * @lock: Pointer to queued spinlock structure
- * Return: 1 if it is locked, 0 otherwise
- */
- #ifndef queued_spin_is_locked
- static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
- {
- /*
- * See queued_spin_unlock_wait().
- *
- * Any !0 state indicates it is locked, even if _Q_LOCKED_VAL
- * isn't immediately observable.
- */
- return atomic_read(&lock->val);
- }
- #endif
- /**
- * queued_spin_value_unlocked - is the spinlock structure unlocked?
- * @lock: queued spinlock structure
- * Return: 1 if it is unlocked, 0 otherwise
- *
- * N.B. Whenever there are tasks waiting for the lock, it is considered
- * locked wrt the lockref code to avoid lock stealing by the lockref
- * code and change things underneath the lock. This also allows some
- * optimizations to be applied without conflict with lockref.
- */
- static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
- {
- return !atomic_read(&lock.val);
- }
- /**
- * queued_spin_is_contended - check if the lock is contended
- * @lock : Pointer to queued spinlock structure
- * Return: 1 if lock contended, 0 otherwise
- */
- static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
- {
- return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
- }
- /**
- * queued_spin_trylock - try to acquire the queued spinlock
- * @lock : Pointer to queued spinlock structure
- * Return: 1 if lock acquired, 0 if failed
- */
- static __always_inline int queued_spin_trylock(struct qspinlock *lock)
- {
- if (!atomic_read(&lock->val) &&
- (atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL) == 0))
- return 1;
- return 0;
- }
- extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
- /**
- * queued_spin_lock - acquire a queued spinlock
- * @lock: Pointer to queued spinlock structure
- */
- static __always_inline void queued_spin_lock(struct qspinlock *lock)
- {
- u32 val;
- val = atomic_cmpxchg_acquire(&lock->val, 0, _Q_LOCKED_VAL);
- if (likely(val == 0))
- return;
- queued_spin_lock_slowpath(lock, val);
- }
- #ifndef queued_spin_unlock
- /**
- * queued_spin_unlock - release a queued spinlock
- * @lock : Pointer to queued spinlock structure
- */
- static __always_inline void queued_spin_unlock(struct qspinlock *lock)
- {
- /*
- * unlock() needs release semantics:
- */
- (void)atomic_sub_return_release(_Q_LOCKED_VAL, &lock->val);
- }
- #endif
- #ifndef virt_spin_lock
- static __always_inline bool virt_spin_lock(struct qspinlock *lock)
- {
- return false;
- }
- #endif
- /*
- * Remapping spinlock architecture specific functions to the corresponding
- * queued spinlock functions.
- */
- #define arch_spin_is_locked(l) queued_spin_is_locked(l)
- #define arch_spin_is_contended(l) queued_spin_is_contended(l)
- #define arch_spin_value_unlocked(l) queued_spin_value_unlocked(l)
- #define arch_spin_lock(l) queued_spin_lock(l)
- #define arch_spin_trylock(l) queued_spin_trylock(l)
- #define arch_spin_unlock(l) queued_spin_unlock(l)
- #define arch_spin_lock_flags(l, f) queued_spin_lock(l)
- #define arch_spin_unlock_wait(l) queued_spin_unlock_wait(l)
- #endif /* __ASM_GENERIC_QSPINLOCK_H */
|