mutex-llsc.h 2.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110
  1. /*
  2. * arch/sh/include/asm/mutex-llsc.h
  3. *
  4. * SH-4A optimized mutex locking primitives
  5. *
  6. * Please look into asm-generic/mutex-xchg.h for a formal definition.
  7. */
  8. #ifndef __ASM_SH_MUTEX_LLSC_H
  9. #define __ASM_SH_MUTEX_LLSC_H
  10. /*
  11. * Attempting to lock a mutex on SH4A is done like in ARMv6+ architecure.
  12. * with a bastardized atomic decrement (it is not a reliable atomic decrement
  13. * but it satisfies the defined semantics for our purpose, while being
  14. * smaller and faster than a real atomic decrement or atomic swap.
  15. * The idea is to attempt decrementing the lock value only once. If once
  16. * decremented it isn't zero, or if its store-back fails due to a dispute
  17. * on the exclusive store, we simply bail out immediately through the slow
  18. * path where the lock will be reattempted until it succeeds.
  19. */
  20. static inline void
  21. __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
  22. {
  23. int __done, __res;
  24. __asm__ __volatile__ (
  25. "movli.l @%2, %0 \n"
  26. "add #-1, %0 \n"
  27. "movco.l %0, @%2 \n"
  28. "movt %1 \n"
  29. : "=&z" (__res), "=&r" (__done)
  30. : "r" (&(count)->counter)
  31. : "t");
  32. if (unlikely(!__done || __res != 0))
  33. fail_fn(count);
  34. }
  35. static inline int
  36. __mutex_fastpath_lock_retval(atomic_t *count)
  37. {
  38. int __done, __res;
  39. __asm__ __volatile__ (
  40. "movli.l @%2, %0 \n"
  41. "add #-1, %0 \n"
  42. "movco.l %0, @%2 \n"
  43. "movt %1 \n"
  44. : "=&z" (__res), "=&r" (__done)
  45. : "r" (&(count)->counter)
  46. : "t");
  47. if (unlikely(!__done || __res != 0))
  48. __res = -1;
  49. return __res;
  50. }
  51. static inline void
  52. __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
  53. {
  54. int __done, __res;
  55. __asm__ __volatile__ (
  56. "movli.l @%2, %0 \n\t"
  57. "add #1, %0 \n\t"
  58. "movco.l %0, @%2 \n\t"
  59. "movt %1 \n\t"
  60. : "=&z" (__res), "=&r" (__done)
  61. : "r" (&(count)->counter)
  62. : "t");
  63. if (unlikely(!__done || __res <= 0))
  64. fail_fn(count);
  65. }
  66. /*
  67. * If the unlock was done on a contended lock, or if the unlock simply fails
  68. * then the mutex remains locked.
  69. */
  70. #define __mutex_slowpath_needs_to_unlock() 1
  71. /*
  72. * For __mutex_fastpath_trylock we do an atomic decrement and check the
  73. * result and put it in the __res variable.
  74. */
  75. static inline int
  76. __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
  77. {
  78. int __res, __orig;
  79. __asm__ __volatile__ (
  80. "1: movli.l @%2, %0 \n\t"
  81. "dt %0 \n\t"
  82. "movco.l %0,@%2 \n\t"
  83. "bf 1b \n\t"
  84. "cmp/eq #0,%0 \n\t"
  85. "bt 2f \n\t"
  86. "mov #0, %1 \n\t"
  87. "bf 3f \n\t"
  88. "2: mov #1, %1 \n\t"
  89. "3: "
  90. : "=&z" (__orig), "=&r" (__res)
  91. : "r" (&count->counter)
  92. : "t");
  93. return __res;
  94. }
  95. #endif /* __ASM_SH_MUTEX_LLSC_H */