rwmutex_test.go 4.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. // Copyright 2009 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. // GOMAXPROCS=10 go test
  5. package sync_test
  6. import (
  7. "fmt"
  8. "runtime"
  9. . "sync"
  10. "sync/atomic"
  11. "testing"
  12. )
  13. func parallelReader(m *RWMutex, clocked, cunlock, cdone chan bool) {
  14. m.RLock()
  15. clocked <- true
  16. <-cunlock
  17. m.RUnlock()
  18. cdone <- true
  19. }
  20. func doTestParallelReaders(numReaders, gomaxprocs int) {
  21. runtime.GOMAXPROCS(gomaxprocs)
  22. var m RWMutex
  23. clocked := make(chan bool)
  24. cunlock := make(chan bool)
  25. cdone := make(chan bool)
  26. for i := 0; i < numReaders; i++ {
  27. go parallelReader(&m, clocked, cunlock, cdone)
  28. }
  29. // Wait for all parallel RLock()s to succeed.
  30. for i := 0; i < numReaders; i++ {
  31. <-clocked
  32. }
  33. for i := 0; i < numReaders; i++ {
  34. cunlock <- true
  35. }
  36. // Wait for the goroutines to finish.
  37. for i := 0; i < numReaders; i++ {
  38. <-cdone
  39. }
  40. }
  41. func TestParallelReaders(t *testing.T) {
  42. defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
  43. doTestParallelReaders(1, 4)
  44. doTestParallelReaders(3, 4)
  45. doTestParallelReaders(4, 2)
  46. }
  47. func reader(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
  48. for i := 0; i < num_iterations; i++ {
  49. rwm.RLock()
  50. n := atomic.AddInt32(activity, 1)
  51. if n < 1 || n >= 10000 {
  52. panic(fmt.Sprintf("wlock(%d)\n", n))
  53. }
  54. for i := 0; i < 100; i++ {
  55. }
  56. atomic.AddInt32(activity, -1)
  57. rwm.RUnlock()
  58. }
  59. cdone <- true
  60. }
  61. func writer(rwm *RWMutex, num_iterations int, activity *int32, cdone chan bool) {
  62. for i := 0; i < num_iterations; i++ {
  63. rwm.Lock()
  64. n := atomic.AddInt32(activity, 10000)
  65. if n != 10000 {
  66. panic(fmt.Sprintf("wlock(%d)\n", n))
  67. }
  68. for i := 0; i < 100; i++ {
  69. }
  70. atomic.AddInt32(activity, -10000)
  71. rwm.Unlock()
  72. }
  73. cdone <- true
  74. }
  75. func HammerRWMutex(gomaxprocs, numReaders, num_iterations int) {
  76. runtime.GOMAXPROCS(gomaxprocs)
  77. // Number of active readers + 10000 * number of active writers.
  78. var activity int32
  79. var rwm RWMutex
  80. cdone := make(chan bool)
  81. go writer(&rwm, num_iterations, &activity, cdone)
  82. var i int
  83. for i = 0; i < numReaders/2; i++ {
  84. go reader(&rwm, num_iterations, &activity, cdone)
  85. }
  86. go writer(&rwm, num_iterations, &activity, cdone)
  87. for ; i < numReaders; i++ {
  88. go reader(&rwm, num_iterations, &activity, cdone)
  89. }
  90. // Wait for the 2 writers and all readers to finish.
  91. for i := 0; i < 2+numReaders; i++ {
  92. <-cdone
  93. }
  94. }
  95. func TestRWMutex(t *testing.T) {
  96. defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(-1))
  97. n := 1000
  98. if testing.Short() {
  99. n = 5
  100. }
  101. HammerRWMutex(1, 1, n)
  102. HammerRWMutex(1, 3, n)
  103. HammerRWMutex(1, 10, n)
  104. HammerRWMutex(4, 1, n)
  105. HammerRWMutex(4, 3, n)
  106. HammerRWMutex(4, 10, n)
  107. HammerRWMutex(10, 1, n)
  108. HammerRWMutex(10, 3, n)
  109. HammerRWMutex(10, 10, n)
  110. HammerRWMutex(10, 5, n)
  111. }
  112. func TestRLocker(t *testing.T) {
  113. var wl RWMutex
  114. var rl Locker
  115. wlocked := make(chan bool, 1)
  116. rlocked := make(chan bool, 1)
  117. rl = wl.RLocker()
  118. n := 10
  119. go func() {
  120. for i := 0; i < n; i++ {
  121. rl.Lock()
  122. rl.Lock()
  123. rlocked <- true
  124. wl.Lock()
  125. wlocked <- true
  126. }
  127. }()
  128. for i := 0; i < n; i++ {
  129. <-rlocked
  130. rl.Unlock()
  131. select {
  132. case <-wlocked:
  133. t.Fatal("RLocker() didn't read-lock it")
  134. default:
  135. }
  136. rl.Unlock()
  137. <-wlocked
  138. select {
  139. case <-rlocked:
  140. t.Fatal("RLocker() didn't respect the write lock")
  141. default:
  142. }
  143. wl.Unlock()
  144. }
  145. }
  146. func TestUnlockPanic(t *testing.T) {
  147. defer func() {
  148. if recover() == nil {
  149. t.Fatalf("unlock of unlocked RWMutex did not panic")
  150. }
  151. }()
  152. var mu RWMutex
  153. mu.Unlock()
  154. }
  155. func TestUnlockPanic2(t *testing.T) {
  156. defer func() {
  157. if recover() == nil {
  158. t.Fatalf("unlock of unlocked RWMutex did not panic")
  159. }
  160. }()
  161. var mu RWMutex
  162. mu.RLock()
  163. mu.Unlock()
  164. }
  165. func TestRUnlockPanic(t *testing.T) {
  166. defer func() {
  167. if recover() == nil {
  168. t.Fatalf("read unlock of unlocked RWMutex did not panic")
  169. }
  170. }()
  171. var mu RWMutex
  172. mu.RUnlock()
  173. }
  174. func TestRUnlockPanic2(t *testing.T) {
  175. defer func() {
  176. if recover() == nil {
  177. t.Fatalf("read unlock of unlocked RWMutex did not panic")
  178. }
  179. }()
  180. var mu RWMutex
  181. mu.Lock()
  182. mu.RUnlock()
  183. }
  184. func BenchmarkRWMutexUncontended(b *testing.B) {
  185. type PaddedRWMutex struct {
  186. RWMutex
  187. pad [32]uint32
  188. }
  189. b.RunParallel(func(pb *testing.PB) {
  190. var rwm PaddedRWMutex
  191. for pb.Next() {
  192. rwm.RLock()
  193. rwm.RLock()
  194. rwm.RUnlock()
  195. rwm.RUnlock()
  196. rwm.Lock()
  197. rwm.Unlock()
  198. }
  199. })
  200. }
  201. func benchmarkRWMutex(b *testing.B, localWork, writeRatio int) {
  202. var rwm RWMutex
  203. b.RunParallel(func(pb *testing.PB) {
  204. foo := 0
  205. for pb.Next() {
  206. foo++
  207. if foo%writeRatio == 0 {
  208. rwm.Lock()
  209. rwm.Unlock()
  210. } else {
  211. rwm.RLock()
  212. for i := 0; i != localWork; i += 1 {
  213. foo *= 2
  214. foo /= 2
  215. }
  216. rwm.RUnlock()
  217. }
  218. }
  219. _ = foo
  220. })
  221. }
  222. func BenchmarkRWMutexWrite100(b *testing.B) {
  223. benchmarkRWMutex(b, 0, 100)
  224. }
  225. func BenchmarkRWMutexWrite10(b *testing.B) {
  226. benchmarkRWMutex(b, 0, 10)
  227. }
  228. func BenchmarkRWMutexWorkWrite100(b *testing.B) {
  229. benchmarkRWMutex(b, 100, 100)
  230. }
  231. func BenchmarkRWMutexWorkWrite10(b *testing.B) {
  232. benchmarkRWMutex(b, 100, 10)
  233. }