lock_sema.go 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271
  1. // Copyright 2011 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. // +build darwin nacl netbsd openbsd plan9 solaris windows
  5. package runtime
  6. import "unsafe"
  7. // This implementation depends on OS-specific implementations of
  8. //
  9. // uintptr runtime·semacreate(void)
  10. // Create a semaphore, which will be assigned to m->waitsema.
  11. // The zero value is treated as absence of any semaphore,
  12. // so be sure to return a non-zero value.
  13. //
  14. // int32 runtime·semasleep(int64 ns)
  15. // If ns < 0, acquire m->waitsema and return 0.
  16. // If ns >= 0, try to acquire m->waitsema for at most ns nanoseconds.
  17. // Return 0 if the semaphore was acquired, -1 if interrupted or timed out.
  18. //
  19. // int32 runtime·semawakeup(M *mp)
  20. // Wake up mp, which is or will soon be sleeping on mp->waitsema.
  21. //
  22. const (
  23. locked uintptr = 1
  24. active_spin = 4
  25. active_spin_cnt = 30
  26. passive_spin = 1
  27. )
  28. func semacreate() uintptr
  29. func semasleep(int64) int32
  30. func semawakeup(mp *m)
  31. func lock(l *mutex) {
  32. gp := getg()
  33. if gp.m.locks < 0 {
  34. gothrow("runtime·lock: lock count")
  35. }
  36. gp.m.locks++
  37. // Speculative grab for lock.
  38. if casuintptr(&l.key, 0, locked) {
  39. return
  40. }
  41. if gp.m.waitsema == 0 {
  42. gp.m.waitsema = semacreate()
  43. }
  44. // On uniprocessor's, no point spinning.
  45. // On multiprocessors, spin for ACTIVE_SPIN attempts.
  46. spin := 0
  47. if ncpu > 1 {
  48. spin = active_spin
  49. }
  50. Loop:
  51. for i := 0; ; i++ {
  52. v := atomicloaduintptr(&l.key)
  53. if v&locked == 0 {
  54. // Unlocked. Try to lock.
  55. if casuintptr(&l.key, v, v|locked) {
  56. return
  57. }
  58. i = 0
  59. }
  60. if i < spin {
  61. procyield(active_spin_cnt)
  62. } else if i < spin+passive_spin {
  63. osyield()
  64. } else {
  65. // Someone else has it.
  66. // l->waitm points to a linked list of M's waiting
  67. // for this lock, chained through m->nextwaitm.
  68. // Queue this M.
  69. for {
  70. gp.m.nextwaitm = (*m)((unsafe.Pointer)(v &^ locked))
  71. if casuintptr(&l.key, v, uintptr(unsafe.Pointer(gp.m))|locked) {
  72. break
  73. }
  74. v = atomicloaduintptr(&l.key)
  75. if v&locked == 0 {
  76. continue Loop
  77. }
  78. }
  79. if v&locked != 0 {
  80. // Queued. Wait.
  81. semasleep(-1)
  82. i = 0
  83. }
  84. }
  85. }
  86. }
  87. func unlock(l *mutex) {
  88. gp := getg()
  89. var mp *m
  90. for {
  91. v := atomicloaduintptr(&l.key)
  92. if v == locked {
  93. if casuintptr(&l.key, locked, 0) {
  94. break
  95. }
  96. } else {
  97. // Other M's are waiting for the lock.
  98. // Dequeue an M.
  99. mp = (*m)((unsafe.Pointer)(v &^ locked))
  100. if casuintptr(&l.key, v, uintptr(unsafe.Pointer(mp.nextwaitm))) {
  101. // Dequeued an M. Wake it.
  102. semawakeup(mp)
  103. break
  104. }
  105. }
  106. }
  107. gp.m.locks--
  108. if gp.m.locks < 0 {
  109. gothrow("runtime·unlock: lock count")
  110. }
  111. if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack
  112. gp.stackguard0 = stackPreempt
  113. }
  114. }
  115. // One-time notifications.
  116. func noteclear(n *note) {
  117. n.key = 0
  118. }
  119. func notewakeup(n *note) {
  120. var v uintptr
  121. for {
  122. v = atomicloaduintptr(&n.key)
  123. if casuintptr(&n.key, v, locked) {
  124. break
  125. }
  126. }
  127. // Successfully set waitm to locked.
  128. // What was it before?
  129. switch {
  130. case v == 0:
  131. // Nothing was waiting. Done.
  132. case v == locked:
  133. // Two notewakeups! Not allowed.
  134. gothrow("notewakeup - double wakeup")
  135. default:
  136. // Must be the waiting m. Wake it up.
  137. semawakeup((*m)(unsafe.Pointer(v)))
  138. }
  139. }
  140. func notesleep(n *note) {
  141. gp := getg()
  142. if gp != gp.m.g0 {
  143. gothrow("notesleep not on g0")
  144. }
  145. if gp.m.waitsema == 0 {
  146. gp.m.waitsema = semacreate()
  147. }
  148. if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
  149. // Must be locked (got wakeup).
  150. if n.key != locked {
  151. gothrow("notesleep - waitm out of sync")
  152. }
  153. return
  154. }
  155. // Queued. Sleep.
  156. gp.m.blocked = true
  157. semasleep(-1)
  158. gp.m.blocked = false
  159. }
  160. //go:nosplit
  161. func notetsleep_internal(n *note, ns int64, gp *g, deadline int64) bool {
  162. // gp and deadline are logically local variables, but they are written
  163. // as parameters so that the stack space they require is charged
  164. // to the caller.
  165. // This reduces the nosplit footprint of notetsleep_internal.
  166. gp = getg()
  167. // Register for wakeup on n->waitm.
  168. if !casuintptr(&n.key, 0, uintptr(unsafe.Pointer(gp.m))) {
  169. // Must be locked (got wakeup).
  170. if n.key != locked {
  171. gothrow("notetsleep - waitm out of sync")
  172. }
  173. return true
  174. }
  175. if ns < 0 {
  176. // Queued. Sleep.
  177. gp.m.blocked = true
  178. semasleep(-1)
  179. gp.m.blocked = false
  180. return true
  181. }
  182. deadline = nanotime() + ns
  183. for {
  184. // Registered. Sleep.
  185. gp.m.blocked = true
  186. if semasleep(ns) >= 0 {
  187. gp.m.blocked = false
  188. // Acquired semaphore, semawakeup unregistered us.
  189. // Done.
  190. return true
  191. }
  192. gp.m.blocked = false
  193. // Interrupted or timed out. Still registered. Semaphore not acquired.
  194. ns = deadline - nanotime()
  195. if ns <= 0 {
  196. break
  197. }
  198. // Deadline hasn't arrived. Keep sleeping.
  199. }
  200. // Deadline arrived. Still registered. Semaphore not acquired.
  201. // Want to give up and return, but have to unregister first,
  202. // so that any notewakeup racing with the return does not
  203. // try to grant us the semaphore when we don't expect it.
  204. for {
  205. v := atomicloaduintptr(&n.key)
  206. switch v {
  207. case uintptr(unsafe.Pointer(gp.m)):
  208. // No wakeup yet; unregister if possible.
  209. if casuintptr(&n.key, v, 0) {
  210. return false
  211. }
  212. case locked:
  213. // Wakeup happened so semaphore is available.
  214. // Grab it to avoid getting out of sync.
  215. gp.m.blocked = true
  216. if semasleep(-1) < 0 {
  217. gothrow("runtime: unable to acquire - semaphore out of sync")
  218. }
  219. gp.m.blocked = false
  220. return true
  221. default:
  222. gothrow("runtime: unexpected waitm - semaphore out of sync")
  223. }
  224. }
  225. }
  226. func notetsleep(n *note, ns int64) bool {
  227. gp := getg()
  228. if gp != gp.m.g0 && gp.m.gcing == 0 {
  229. gothrow("notetsleep not on g0")
  230. }
  231. if gp.m.waitsema == 0 {
  232. gp.m.waitsema = semacreate()
  233. }
  234. return notetsleep_internal(n, ns, nil, 0)
  235. }
  236. // same as runtime·notetsleep, but called on user g (not g0)
  237. // calls only nosplit functions between entersyscallblock/exitsyscall
  238. func notetsleepg(n *note, ns int64) bool {
  239. gp := getg()
  240. if gp == gp.m.g0 {
  241. gothrow("notetsleepg on g0")
  242. }
  243. if gp.m.waitsema == 0 {
  244. gp.m.waitsema = semacreate()
  245. }
  246. entersyscallblock()
  247. ok := notetsleep_internal(n, ns, nil, 0)
  248. exitsyscall()
  249. return ok
  250. }