pool.go 6.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226
  1. // Copyright 2013 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. package sync
  5. import (
  6. "runtime"
  7. "sync/atomic"
  8. "unsafe"
  9. )
  10. // A Pool is a set of temporary objects that may be individually saved and
  11. // retrieved.
  12. //
  13. // Any item stored in the Pool may be removed automatically at any time without
  14. // notification. If the Pool holds the only reference when this happens, the
  15. // item might be deallocated.
  16. //
  17. // A Pool is safe for use by multiple goroutines simultaneously.
  18. //
  19. // Pool's purpose is to cache allocated but unused items for later reuse,
  20. // relieving pressure on the garbage collector. That is, it makes it easy to
  21. // build efficient, thread-safe free lists. However, it is not suitable for all
  22. // free lists.
  23. //
  24. // An appropriate use of a Pool is to manage a group of temporary items
  25. // silently shared among and potentially reused by concurrent independent
  26. // clients of a package. Pool provides a way to amortize allocation overhead
  27. // across many clients.
  28. //
  29. // An example of good use of a Pool is in the fmt package, which maintains a
  30. // dynamically-sized store of temporary output buffers. The store scales under
  31. // load (when many goroutines are actively printing) and shrinks when
  32. // quiescent.
  33. //
  34. // On the other hand, a free list maintained as part of a short-lived object is
  35. // not a suitable use for a Pool, since the overhead does not amortize well in
  36. // that scenario. It is more efficient to have such objects implement their own
  37. // free list.
  38. //
  39. type Pool struct {
  40. local unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal
  41. localSize uintptr // size of the local array
  42. // New optionally specifies a function to generate
  43. // a value when Get would otherwise return nil.
  44. // It may not be changed concurrently with calls to Get.
  45. New func() interface{}
  46. }
  47. // Local per-P Pool appendix.
  48. type poolLocal struct {
  49. private interface{} // Can be used only by the respective P.
  50. shared []interface{} // Can be used by any P.
  51. Mutex // Protects shared.
  52. pad [128]byte // Prevents false sharing.
  53. }
  54. // Put adds x to the pool.
  55. func (p *Pool) Put(x interface{}) {
  56. if raceenabled {
  57. // Under race detector the Pool degenerates into no-op.
  58. // It's conforming, simple and does not introduce excessive
  59. // happens-before edges between unrelated goroutines.
  60. return
  61. }
  62. if x == nil {
  63. return
  64. }
  65. l := p.pin()
  66. if l.private == nil {
  67. l.private = x
  68. x = nil
  69. }
  70. runtime_procUnpin()
  71. if x == nil {
  72. return
  73. }
  74. l.Lock()
  75. l.shared = append(l.shared, x)
  76. l.Unlock()
  77. }
  78. // Get selects an arbitrary item from the Pool, removes it from the
  79. // Pool, and returns it to the caller.
  80. // Get may choose to ignore the pool and treat it as empty.
  81. // Callers should not assume any relation between values passed to Put and
  82. // the values returned by Get.
  83. //
  84. // If Get would otherwise return nil and p.New is non-nil, Get returns
  85. // the result of calling p.New.
  86. func (p *Pool) Get() interface{} {
  87. if raceenabled {
  88. if p.New != nil {
  89. return p.New()
  90. }
  91. return nil
  92. }
  93. l := p.pin()
  94. x := l.private
  95. l.private = nil
  96. runtime_procUnpin()
  97. if x != nil {
  98. return x
  99. }
  100. l.Lock()
  101. last := len(l.shared) - 1
  102. if last >= 0 {
  103. x = l.shared[last]
  104. l.shared = l.shared[:last]
  105. }
  106. l.Unlock()
  107. if x != nil {
  108. return x
  109. }
  110. return p.getSlow()
  111. }
  112. func (p *Pool) getSlow() (x interface{}) {
  113. // See the comment in pin regarding ordering of the loads.
  114. size := atomic.LoadUintptr(&p.localSize) // load-acquire
  115. local := p.local // load-consume
  116. // Try to steal one element from other procs.
  117. pid := runtime_procPin()
  118. runtime_procUnpin()
  119. for i := 0; i < int(size); i++ {
  120. l := indexLocal(local, (pid+i+1)%int(size))
  121. l.Lock()
  122. last := len(l.shared) - 1
  123. if last >= 0 {
  124. x = l.shared[last]
  125. l.shared = l.shared[:last]
  126. l.Unlock()
  127. break
  128. }
  129. l.Unlock()
  130. }
  131. if x == nil && p.New != nil {
  132. x = p.New()
  133. }
  134. return x
  135. }
  136. // pin pins the current goroutine to P, disables preemption and returns poolLocal pool for the P.
  137. // Caller must call runtime_procUnpin() when done with the pool.
  138. func (p *Pool) pin() *poolLocal {
  139. pid := runtime_procPin()
  140. // In pinSlow we store to localSize and then to local, here we load in opposite order.
  141. // Since we've disabled preemption, GC can not happen in between.
  142. // Thus here we must observe local at least as large localSize.
  143. // We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness).
  144. s := atomic.LoadUintptr(&p.localSize) // load-acquire
  145. l := p.local // load-consume
  146. if uintptr(pid) < s {
  147. return indexLocal(l, pid)
  148. }
  149. return p.pinSlow()
  150. }
  151. func (p *Pool) pinSlow() *poolLocal {
  152. // Retry under the mutex.
  153. // Can not lock the mutex while pinned.
  154. runtime_procUnpin()
  155. allPoolsMu.Lock()
  156. defer allPoolsMu.Unlock()
  157. pid := runtime_procPin()
  158. // poolCleanup won't be called while we are pinned.
  159. s := p.localSize
  160. l := p.local
  161. if uintptr(pid) < s {
  162. return indexLocal(l, pid)
  163. }
  164. if p.local == nil {
  165. allPools = append(allPools, p)
  166. }
  167. // If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one.
  168. size := runtime.GOMAXPROCS(0)
  169. local := make([]poolLocal, size)
  170. atomic.StorePointer((*unsafe.Pointer)(&p.local), unsafe.Pointer(&local[0])) // store-release
  171. atomic.StoreUintptr(&p.localSize, uintptr(size)) // store-release
  172. return &local[pid]
  173. }
  174. func poolCleanup() {
  175. // This function is called with the world stopped, at the beginning of a garbage collection.
  176. // It must not allocate and probably should not call any runtime functions.
  177. // Defensively zero out everything, 2 reasons:
  178. // 1. To prevent false retention of whole Pools.
  179. // 2. If GC happens while a goroutine works with l.shared in Put/Get,
  180. // it will retain whole Pool. So next cycle memory consumption would be doubled.
  181. for i, p := range allPools {
  182. allPools[i] = nil
  183. for i := 0; i < int(p.localSize); i++ {
  184. l := indexLocal(p.local, i)
  185. l.private = nil
  186. for j := range l.shared {
  187. l.shared[j] = nil
  188. }
  189. l.shared = nil
  190. }
  191. p.local = nil
  192. p.localSize = 0
  193. }
  194. allPools = []*Pool{}
  195. }
  196. var (
  197. allPoolsMu Mutex
  198. allPools []*Pool
  199. )
  200. func init() {
  201. runtime_registerPoolCleanup(poolCleanup)
  202. }
  203. func indexLocal(l unsafe.Pointer, i int) *poolLocal {
  204. return &(*[1000000]poolLocal)(l)[i]
  205. }
  206. // Implemented in runtime.
  207. func runtime_registerPoolCleanup(cleanup func())
  208. func runtime_procPin() int
  209. func runtime_procUnpin()