123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204 |
- #include <linux/percpu.h>
- #include <linux/sched.h>
- #include <linux/osq_lock.h>
- static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
- static inline int encode_cpu(int cpu_nr)
- {
- return cpu_nr + 1;
- }
- static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
- {
- int cpu_nr = encoded_cpu_val - 1;
- return per_cpu_ptr(&osq_node, cpu_nr);
- }
- static inline struct optimistic_spin_node *
- osq_wait_next(struct optimistic_spin_queue *lock,
- struct optimistic_spin_node *node,
- struct optimistic_spin_node *prev)
- {
- struct optimistic_spin_node *next = NULL;
- int curr = encode_cpu(smp_processor_id());
- int old;
-
- old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
- for (;;) {
- if (atomic_read(&lock->tail) == curr &&
- atomic_cmpxchg(&lock->tail, curr, old) == curr) {
-
- break;
- }
-
- if (node->next) {
- next = xchg(&node->next, NULL);
- if (next)
- break;
- }
- cpu_relax_lowlatency();
- }
- return next;
- }
- bool osq_lock(struct optimistic_spin_queue *lock)
- {
- struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
- struct optimistic_spin_node *prev, *next;
- int curr = encode_cpu(smp_processor_id());
- int old;
- node->locked = 0;
- node->next = NULL;
- node->cpu = curr;
- old = atomic_xchg(&lock->tail, curr);
- if (old == OSQ_UNLOCKED_VAL)
- return true;
- prev = decode_cpu(old);
- node->prev = prev;
- WRITE_ONCE(prev->next, node);
-
- while (!READ_ONCE(node->locked)) {
-
- if (need_resched())
- goto unqueue;
- cpu_relax_lowlatency();
- }
- return true;
- unqueue:
-
- for (;;) {
- if (prev->next == node &&
- cmpxchg(&prev->next, node, NULL) == node)
- break;
-
- if (smp_load_acquire(&node->locked))
- return true;
- cpu_relax_lowlatency();
-
- prev = READ_ONCE(node->prev);
- }
-
- next = osq_wait_next(lock, node, prev);
- if (!next)
- return false;
-
- WRITE_ONCE(next->prev, prev);
- WRITE_ONCE(prev->next, next);
- return false;
- }
- void osq_unlock(struct optimistic_spin_queue *lock)
- {
- struct optimistic_spin_node *node, *next;
- int curr = encode_cpu(smp_processor_id());
-
- if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
- return;
-
- node = this_cpu_ptr(&osq_node);
- next = xchg(&node->next, NULL);
- if (next) {
- WRITE_ONCE(next->locked, 1);
- return;
- }
- next = osq_wait_next(lock, node, NULL);
- if (next)
- WRITE_ONCE(next->locked, 1);
- }
|