atomic_32.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207
  1. /*
  2. * Copyright 2010 Tilera Corporation. All Rights Reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public License
  6. * as published by the Free Software Foundation, version 2.
  7. *
  8. * This program is distributed in the hope that it will be useful, but
  9. * WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
  11. * NON INFRINGEMENT. See the GNU General Public License for
  12. * more details.
  13. */
  14. #include <linux/cache.h>
  15. #include <linux/delay.h>
  16. #include <linux/uaccess.h>
  17. #include <linux/module.h>
  18. #include <linux/mm.h>
  19. #include <linux/atomic.h>
  20. #include <arch/chip.h>
  21. /* This page is remapped on startup to be hash-for-home. */
  22. int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
  23. int *__atomic_hashed_lock(volatile void *v)
  24. {
  25. /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
  26. /*
  27. * Use bits [3, 3 + ATOMIC_HASH_SHIFT) as the lock index.
  28. * Using mm works here because atomic_locks is page aligned.
  29. */
  30. unsigned long ptr = __insn_mm((unsigned long)v >> 1,
  31. (unsigned long)atomic_locks,
  32. 2, (ATOMIC_HASH_SHIFT + 2) - 1);
  33. return (int *)ptr;
  34. }
  35. #ifdef CONFIG_SMP
  36. /* Return whether the passed pointer is a valid atomic lock pointer. */
  37. static int is_atomic_lock(int *p)
  38. {
  39. return p >= &atomic_locks[0] && p < &atomic_locks[ATOMIC_HASH_SIZE];
  40. }
  41. void __atomic_fault_unlock(int *irqlock_word)
  42. {
  43. BUG_ON(!is_atomic_lock(irqlock_word));
  44. BUG_ON(*irqlock_word != 1);
  45. *irqlock_word = 0;
  46. }
  47. #endif /* CONFIG_SMP */
  48. static inline int *__atomic_setup(volatile void *v)
  49. {
  50. /* Issue a load to the target to bring it into cache. */
  51. *(volatile int *)v;
  52. return __atomic_hashed_lock(v);
  53. }
  54. int _atomic_xchg(int *v, int n)
  55. {
  56. return __atomic32_xchg(v, __atomic_setup(v), n).val;
  57. }
  58. EXPORT_SYMBOL(_atomic_xchg);
  59. int _atomic_xchg_add(int *v, int i)
  60. {
  61. return __atomic32_xchg_add(v, __atomic_setup(v), i).val;
  62. }
  63. EXPORT_SYMBOL(_atomic_xchg_add);
  64. int _atomic_xchg_add_unless(int *v, int a, int u)
  65. {
  66. /*
  67. * Note: argument order is switched here since it is easier
  68. * to use the first argument consistently as the "old value"
  69. * in the assembly, as is done for _atomic_cmpxchg().
  70. */
  71. return __atomic32_xchg_add_unless(v, __atomic_setup(v), u, a).val;
  72. }
  73. EXPORT_SYMBOL(_atomic_xchg_add_unless);
  74. int _atomic_cmpxchg(int *v, int o, int n)
  75. {
  76. return __atomic32_cmpxchg(v, __atomic_setup(v), o, n).val;
  77. }
  78. EXPORT_SYMBOL(_atomic_cmpxchg);
  79. unsigned long _atomic_fetch_or(volatile unsigned long *p, unsigned long mask)
  80. {
  81. return __atomic32_fetch_or((int *)p, __atomic_setup(p), mask).val;
  82. }
  83. EXPORT_SYMBOL(_atomic_fetch_or);
  84. unsigned long _atomic_fetch_and(volatile unsigned long *p, unsigned long mask)
  85. {
  86. return __atomic32_fetch_and((int *)p, __atomic_setup(p), mask).val;
  87. }
  88. EXPORT_SYMBOL(_atomic_fetch_and);
  89. unsigned long _atomic_fetch_andn(volatile unsigned long *p, unsigned long mask)
  90. {
  91. return __atomic32_fetch_andn((int *)p, __atomic_setup(p), mask).val;
  92. }
  93. EXPORT_SYMBOL(_atomic_fetch_andn);
  94. unsigned long _atomic_fetch_xor(volatile unsigned long *p, unsigned long mask)
  95. {
  96. return __atomic32_fetch_xor((int *)p, __atomic_setup(p), mask).val;
  97. }
  98. EXPORT_SYMBOL(_atomic_fetch_xor);
  99. long long _atomic64_xchg(long long *v, long long n)
  100. {
  101. return __atomic64_xchg(v, __atomic_setup(v), n);
  102. }
  103. EXPORT_SYMBOL(_atomic64_xchg);
  104. long long _atomic64_xchg_add(long long *v, long long i)
  105. {
  106. return __atomic64_xchg_add(v, __atomic_setup(v), i);
  107. }
  108. EXPORT_SYMBOL(_atomic64_xchg_add);
  109. long long _atomic64_xchg_add_unless(long long *v, long long a, long long u)
  110. {
  111. /*
  112. * Note: argument order is switched here since it is easier
  113. * to use the first argument consistently as the "old value"
  114. * in the assembly, as is done for _atomic_cmpxchg().
  115. */
  116. return __atomic64_xchg_add_unless(v, __atomic_setup(v), u, a);
  117. }
  118. EXPORT_SYMBOL(_atomic64_xchg_add_unless);
  119. long long _atomic64_cmpxchg(long long *v, long long o, long long n)
  120. {
  121. return __atomic64_cmpxchg(v, __atomic_setup(v), o, n);
  122. }
  123. EXPORT_SYMBOL(_atomic64_cmpxchg);
  124. long long _atomic64_fetch_and(long long *v, long long n)
  125. {
  126. return __atomic64_fetch_and(v, __atomic_setup(v), n);
  127. }
  128. EXPORT_SYMBOL(_atomic64_fetch_and);
  129. long long _atomic64_fetch_or(long long *v, long long n)
  130. {
  131. return __atomic64_fetch_or(v, __atomic_setup(v), n);
  132. }
  133. EXPORT_SYMBOL(_atomic64_fetch_or);
  134. long long _atomic64_fetch_xor(long long *v, long long n)
  135. {
  136. return __atomic64_fetch_xor(v, __atomic_setup(v), n);
  137. }
  138. EXPORT_SYMBOL(_atomic64_fetch_xor);
  139. /*
  140. * If any of the atomic or futex routines hit a bad address (not in
  141. * the page tables at kernel PL) this routine is called. The futex
  142. * routines are never used on kernel space, and the normal atomics and
  143. * bitops are never used on user space. So a fault on kernel space
  144. * must be fatal, but a fault on userspace is a futex fault and we
  145. * need to return -EFAULT. Note that the context this routine is
  146. * invoked in is the context of the "_atomic_xxx()" routines called
  147. * by the functions in this file.
  148. */
  149. struct __get_user __atomic_bad_address(int __user *addr)
  150. {
  151. if (unlikely(!access_ok(VERIFY_WRITE, addr, sizeof(int))))
  152. panic("Bad address used for kernel atomic op: %p\n", addr);
  153. return (struct __get_user) { .err = -EFAULT };
  154. }
  155. void __init __init_atomic_per_cpu(void)
  156. {
  157. /* Validate power-of-two and "bigger than cpus" assumption */
  158. BUILD_BUG_ON(ATOMIC_HASH_SIZE & (ATOMIC_HASH_SIZE-1));
  159. BUG_ON(ATOMIC_HASH_SIZE < nr_cpu_ids);
  160. /*
  161. * On TILEPro we prefer to use a single hash-for-home
  162. * page, since this means atomic operations are less
  163. * likely to encounter a TLB fault and thus should
  164. * in general perform faster. You may wish to disable
  165. * this in situations where few hash-for-home tiles
  166. * are configured.
  167. */
  168. BUG_ON((unsigned long)atomic_locks % PAGE_SIZE != 0);
  169. /* The locks must all fit on one page. */
  170. BUILD_BUG_ON(ATOMIC_HASH_SIZE * sizeof(int) > PAGE_SIZE);
  171. /*
  172. * We use the page offset of the atomic value's address as
  173. * an index into atomic_locks, excluding the low 3 bits.
  174. * That should not produce more indices than ATOMIC_HASH_SIZE.
  175. */
  176. BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
  177. }