qspinlock_types.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113
  1. /*
  2. * Queued spinlock
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. *
  14. * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
  15. *
  16. * Authors: Waiman Long <waiman.long@hp.com>
  17. */
  18. #ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
  19. #define __ASM_GENERIC_QSPINLOCK_TYPES_H
  20. /*
  21. * Including atomic.h with PARAVIRT on will cause compilation errors because
  22. * of recursive header file incluson via paravirt_types.h. So don't include
  23. * it if PARAVIRT is on.
  24. */
  25. #ifndef CONFIG_PARAVIRT
  26. #include <linux/types.h>
  27. #include <linux/atomic.h>
  28. #endif
  29. typedef struct qspinlock {
  30. union {
  31. atomic_t val;
  32. /*
  33. * By using the whole 2nd least significant byte for the
  34. * pending bit, we can allow better optimization of the lock
  35. * acquisition for the pending bit holder.
  36. */
  37. #ifdef __LITTLE_ENDIAN
  38. struct {
  39. u8 locked;
  40. u8 pending;
  41. };
  42. struct {
  43. u16 locked_pending;
  44. u16 tail;
  45. };
  46. #else
  47. struct {
  48. u16 tail;
  49. u16 locked_pending;
  50. };
  51. struct {
  52. u8 reserved[2];
  53. u8 pending;
  54. u8 locked;
  55. };
  56. #endif
  57. };
  58. } arch_spinlock_t;
  59. /*
  60. * Initializier
  61. */
  62. #define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
  63. /*
  64. * Bitfields in the atomic value:
  65. *
  66. * When NR_CPUS < 16K
  67. * 0- 7: locked byte
  68. * 8: pending
  69. * 9-15: not used
  70. * 16-17: tail index
  71. * 18-31: tail cpu (+1)
  72. *
  73. * When NR_CPUS >= 16K
  74. * 0- 7: locked byte
  75. * 8: pending
  76. * 9-10: tail index
  77. * 11-31: tail cpu (+1)
  78. */
  79. #define _Q_SET_MASK(type) (((1U << _Q_ ## type ## _BITS) - 1)\
  80. << _Q_ ## type ## _OFFSET)
  81. #define _Q_LOCKED_OFFSET 0
  82. #define _Q_LOCKED_BITS 8
  83. #define _Q_LOCKED_MASK _Q_SET_MASK(LOCKED)
  84. #define _Q_PENDING_OFFSET (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
  85. #if CONFIG_NR_CPUS < (1U << 14)
  86. #define _Q_PENDING_BITS 8
  87. #else
  88. #define _Q_PENDING_BITS 1
  89. #endif
  90. #define _Q_PENDING_MASK _Q_SET_MASK(PENDING)
  91. #define _Q_TAIL_IDX_OFFSET (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
  92. #define _Q_TAIL_IDX_BITS 2
  93. #define _Q_TAIL_IDX_MASK _Q_SET_MASK(TAIL_IDX)
  94. #define _Q_TAIL_CPU_OFFSET (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
  95. #define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
  96. #define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)
  97. #define _Q_TAIL_OFFSET _Q_TAIL_IDX_OFFSET
  98. #define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
  99. #define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
  100. #define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)
  101. #endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */