123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113 |
- /*
- * Queued spinlock
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
- *
- * Authors: Waiman Long <waiman.long@hp.com>
- */
- #ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
- #define __ASM_GENERIC_QSPINLOCK_TYPES_H
- /*
- * Including atomic.h with PARAVIRT on will cause compilation errors because
- * of recursive header file incluson via paravirt_types.h. So don't include
- * it if PARAVIRT is on.
- */
- #ifndef CONFIG_PARAVIRT
- #include <linux/types.h>
- #include <linux/atomic.h>
- #endif
- typedef struct qspinlock {
- union {
- atomic_t val;
- /*
- * By using the whole 2nd least significant byte for the
- * pending bit, we can allow better optimization of the lock
- * acquisition for the pending bit holder.
- */
- #ifdef __LITTLE_ENDIAN
- struct {
- u8 locked;
- u8 pending;
- };
- struct {
- u16 locked_pending;
- u16 tail;
- };
- #else
- struct {
- u16 tail;
- u16 locked_pending;
- };
- struct {
- u8 reserved[2];
- u8 pending;
- u8 locked;
- };
- #endif
- };
- } arch_spinlock_t;
- /*
- * Initializier
- */
- #define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
- /*
- * Bitfields in the atomic value:
- *
- * When NR_CPUS < 16K
- * 0- 7: locked byte
- * 8: pending
- * 9-15: not used
- * 16-17: tail index
- * 18-31: tail cpu (+1)
- *
- * When NR_CPUS >= 16K
- * 0- 7: locked byte
- * 8: pending
- * 9-10: tail index
- * 11-31: tail cpu (+1)
- */
- #define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
- << _Q_ ## type ## _OFFSET)
- #define _Q_LOCKED_OFFSET 0
- #define _Q_LOCKED_BITS 8
- #define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
- #define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
- #if CONFIG_NR_CPUS < (1U << 14)
- #define _Q_PENDING_BITS 8
- #else
- #define _Q_PENDING_BITS 1
- #endif
- #define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
- #define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
- #define _Q_TAIL_IDX_BITS 2
- #define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
- #define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
- #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
- #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
- #define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
- #define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
- #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
- #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
- #endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */
|