spinlock.h 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144
  1. /*
  2. * Copyright (C) 2015 Regents of the University of California
  3. * Copyright (C) 2017 SiFive
  4. *
  5. * This program is free software; you can redistribute it and/or
  6. * modify it under the terms of the GNU General Public License
  7. * as published by the Free Software Foundation, version 2.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  12. * GNU General Public License for more details.
  13. */
  14. #ifndef _ASM_RISCV_SPINLOCK_H
  15. #define _ASM_RISCV_SPINLOCK_H
  16. #include <linux/kernel.h>
  17. #include <asm/current.h>
  18. #include <asm/fence.h>
  19. /*
  20. * Simple spin lock operations. These provide no fairness guarantees.
  21. */
  22. /* FIXME: Replace this with a ticket lock, like MIPS. */
  23. #define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
  24. static inline void arch_spin_unlock(arch_spinlock_t *lock)
  25. {
  26. smp_store_release(&lock->lock, 0);
  27. }
  28. static inline int arch_spin_trylock(arch_spinlock_t *lock)
  29. {
  30. int tmp = 1, busy;
  31. __asm__ __volatile__ (
  32. " amoswap.w %0, %2, %1\n"
  33. RISCV_ACQUIRE_BARRIER
  34. : "=r" (busy), "+A" (lock->lock)
  35. : "r" (tmp)
  36. : "memory");
  37. return !busy;
  38. }
  39. static inline void arch_spin_lock(arch_spinlock_t *lock)
  40. {
  41. while (1) {
  42. if (arch_spin_is_locked(lock))
  43. continue;
  44. if (arch_spin_trylock(lock))
  45. break;
  46. }
  47. }
  48. /***********************************************************/
  49. static inline void arch_read_lock(arch_rwlock_t *lock)
  50. {
  51. int tmp;
  52. __asm__ __volatile__(
  53. "1: lr.w %1, %0\n"
  54. " bltz %1, 1b\n"
  55. " addi %1, %1, 1\n"
  56. " sc.w %1, %1, %0\n"
  57. " bnez %1, 1b\n"
  58. RISCV_ACQUIRE_BARRIER
  59. : "+A" (lock->lock), "=&r" (tmp)
  60. :: "memory");
  61. }
  62. static inline void arch_write_lock(arch_rwlock_t *lock)
  63. {
  64. int tmp;
  65. __asm__ __volatile__(
  66. "1: lr.w %1, %0\n"
  67. " bnez %1, 1b\n"
  68. " li %1, -1\n"
  69. " sc.w %1, %1, %0\n"
  70. " bnez %1, 1b\n"
  71. RISCV_ACQUIRE_BARRIER
  72. : "+A" (lock->lock), "=&r" (tmp)
  73. :: "memory");
  74. }
  75. static inline int arch_read_trylock(arch_rwlock_t *lock)
  76. {
  77. int busy;
  78. __asm__ __volatile__(
  79. "1: lr.w %1, %0\n"
  80. " bltz %1, 1f\n"
  81. " addi %1, %1, 1\n"
  82. " sc.w %1, %1, %0\n"
  83. " bnez %1, 1b\n"
  84. RISCV_ACQUIRE_BARRIER
  85. "1:\n"
  86. : "+A" (lock->lock), "=&r" (busy)
  87. :: "memory");
  88. return !busy;
  89. }
  90. static inline int arch_write_trylock(arch_rwlock_t *lock)
  91. {
  92. int busy;
  93. __asm__ __volatile__(
  94. "1: lr.w %1, %0\n"
  95. " bnez %1, 1f\n"
  96. " li %1, -1\n"
  97. " sc.w %1, %1, %0\n"
  98. " bnez %1, 1b\n"
  99. RISCV_ACQUIRE_BARRIER
  100. "1:\n"
  101. : "+A" (lock->lock), "=&r" (busy)
  102. :: "memory");
  103. return !busy;
  104. }
  105. static inline void arch_read_unlock(arch_rwlock_t *lock)
  106. {
  107. __asm__ __volatile__(
  108. RISCV_RELEASE_BARRIER
  109. " amoadd.w x0, %1, %0\n"
  110. : "+A" (lock->lock)
  111. : "r" (-1)
  112. : "memory");
  113. }
  114. static inline void arch_write_unlock(arch_rwlock_t *lock)
  115. {
  116. smp_store_release(&lock->lock, 0);
  117. }
  118. #endif /* _ASM_RISCV_SPINLOCK_H */