123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961 |
- // Copyright 2014 The Go Authors. All rights reserved.
- // Use of this source code is governed by a BSD-style
- // license that can be found in the LICENSE file.
- package runtime
- // This file contains the implementation of Go's map type.
- //
- // A map is just a hash table. The data is arranged
- // into an array of buckets. Each bucket contains up to
- // 8 key/value pairs. The low-order bits of the hash are
- // used to select a bucket. Each bucket contains a few
- // high-order bits of each hash to distinguish the entries
- // within a single bucket.
- //
- // If more than 8 keys hash to a bucket, we chain on
- // extra buckets.
- //
- // When the hashtable grows, we allocate a new array
- // of buckets twice as big. Buckets are incrementally
- // copied from the old bucket array to the new bucket array.
- //
- // Map iterators walk through the array of buckets and
- // return the keys in walk order (bucket #, then overflow
- // chain order, then bucket index). To maintain iteration
- // semantics, we never move keys within their bucket (if
- // we did, keys might be returned 0 or 2 times). When
- // growing the table, iterators remain iterating through the
- // old table and must check the new table if the bucket
- // they are iterating through has been moved ("evacuated")
- // to the new table.
- // Picking loadFactor: too large and we have lots of overflow
- // buckets, too small and we waste a lot of space. I wrote
- // a simple program to check some stats for different loads:
- // (64-bit, 8 byte keys and values)
- // loadFactor %overflow bytes/entry hitprobe missprobe
- // 4.00 2.13 20.77 3.00 4.00
- // 4.50 4.05 17.30 3.25 4.50
- // 5.00 6.85 14.77 3.50 5.00
- // 5.50 10.55 12.94 3.75 5.50
- // 6.00 15.27 11.67 4.00 6.00
- // 6.50 20.90 10.79 4.25 6.50
- // 7.00 27.14 10.15 4.50 7.00
- // 7.50 34.03 9.73 4.75 7.50
- // 8.00 41.10 9.40 5.00 8.00
- //
- // %overflow = percentage of buckets which have an overflow bucket
- // bytes/entry = overhead bytes used per key/value pair
- // hitprobe = # of entries to check when looking up a present key
- // missprobe = # of entries to check when looking up an absent key
- //
- // Keep in mind this data is for maximally loaded tables, i.e. just
- // before the table grows. Typical tables will be somewhat less loaded.
- import (
- "unsafe"
- )
- const (
- // Maximum number of key/value pairs a bucket can hold.
- bucketCntBits = 3
- bucketCnt = 1 << bucketCntBits
- // Maximum average load of a bucket that triggers growth.
- loadFactor = 6.5
- // Maximum key or value size to keep inline (instead of mallocing per element).
- // Must fit in a uint8.
- // Fast versions cannot handle big values - the cutoff size for
- // fast versions in ../../cmd/gc/walk.c must be at most this value.
- maxKeySize = 128
- maxValueSize = 128
- // data offset should be the size of the bmap struct, but needs to be
- // aligned correctly. For amd64p32 this means 64-bit alignment
- // even though pointers are 32 bit.
- dataOffset = unsafe.Offsetof(struct {
- b bmap
- v int64
- }{}.v)
- // Possible tophash values. We reserve a few possibilities for special marks.
- // Each bucket (including its overflow buckets, if any) will have either all or none of its
- // entries in the evacuated* states (except during the evacuate() method, which only happens
- // during map writes and thus no one else can observe the map during that time).
- empty = 0 // cell is empty
- evacuatedEmpty = 1 // cell is empty, bucket is evacuated.
- evacuatedX = 2 // key/value is valid. Entry has been evacuated to first half of larger table.
- evacuatedY = 3 // same as above, but evacuated to second half of larger table.
- minTopHash = 4 // minimum tophash for a normal filled cell.
- // flags
- iterator = 1 // there may be an iterator using buckets
- oldIterator = 2 // there may be an iterator using oldbuckets
- // sentinel bucket ID for iterator checks
- noCheck = 1<<(8*ptrSize) - 1
- // trigger a garbage collection at every alloc called from this code
- checkgc = false
- )
- // A header for a Go map.
- type hmap struct {
- // Note: the format of the Hmap is encoded in ../../cmd/gc/reflect.c and
- // ../reflect/type.go. Don't change this structure without also changing that code!
- count int // # live cells == size of map. Must be first (used by len() builtin)
- flags uint32
- hash0 uint32 // hash seed
- B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
- buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
- oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
- nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
- }
- // A bucket for a Go map.
- type bmap struct {
- tophash [bucketCnt]uint8
- // Followed by bucketCnt keys and then bucketCnt values.
- // NOTE: packing all the keys together and then all the values together makes the
- // code a bit more complicated than alternating key/value/key/value/... but it allows
- // us to eliminate padding which would be needed for, e.g., map[int64]int8.
- // Followed by an overflow pointer.
- }
- // A hash iteration structure.
- // If you modify hiter, also change cmd/gc/reflect.c to indicate
- // the layout of this structure.
- type hiter struct {
- key unsafe.Pointer // Must be in first position. Write nil to indicate iteration end (see cmd/gc/range.c).
- value unsafe.Pointer // Must be in second position (see cmd/gc/range.c).
- t *maptype
- h *hmap
- buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
- bptr *bmap // current bucket
- startBucket uintptr // bucket iteration started at
- offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
- wrapped bool // already wrapped around from end of bucket array to beginning
- B uint8
- i uint8
- bucket uintptr
- checkBucket uintptr
- }
- func evacuated(b *bmap) bool {
- h := b.tophash[0]
- return h > empty && h < minTopHash
- }
- func (b *bmap) overflow(t *maptype) *bmap {
- return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize))
- }
- func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
- *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize)) = ovf
- }
- func makemap(t *maptype, hint int64) *hmap {
- if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) {
- gothrow("bad hmap size")
- }
- if hint < 0 || int64(int32(hint)) != hint {
- panic("makemap: size out of range")
- // TODO: make hint an int, then none of this nonsense
- }
- if !ismapkey(t.key) {
- gothrow("runtime.makemap: unsupported map key type")
- }
- // check compiler's and reflect's math
- if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) ||
- t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
- gothrow("key size wrong")
- }
- if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) ||
- t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
- gothrow("value size wrong")
- }
- // invariants we depend on. We should probably check these at compile time
- // somewhere, but for now we'll do it here.
- if t.key.align > bucketCnt {
- gothrow("key align too big")
- }
- if t.elem.align > bucketCnt {
- gothrow("value align too big")
- }
- if uintptr(t.key.size)%uintptr(t.key.align) != 0 {
- gothrow("key size not a multiple of key align")
- }
- if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 {
- gothrow("value size not a multiple of value align")
- }
- if bucketCnt < 8 {
- gothrow("bucketsize too small for proper alignment")
- }
- if dataOffset%uintptr(t.key.align) != 0 {
- gothrow("need padding in bucket (key)")
- }
- if dataOffset%uintptr(t.elem.align) != 0 {
- gothrow("need padding in bucket (value)")
- }
- // find size parameter which will hold the requested # of elements
- B := uint8(0)
- for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ {
- }
- // allocate initial hash table
- // if B == 0, the buckets field is allocated lazily later (in mapassign)
- // If hint is large zeroing this memory could take a while.
- var buckets unsafe.Pointer
- if B != 0 {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- buckets = newarray(t.bucket, uintptr(1)<<B)
- }
- // initialize Hmap
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- h := (*hmap)(newobject(t.hmap))
- h.count = 0
- h.B = B
- h.flags = 0
- h.hash0 = fastrand1()
- h.buckets = buckets
- h.oldbuckets = nil
- h.nevacuate = 0
- return h
- }
- // mapaccess1 returns a pointer to h[key]. Never returns nil, instead
- // it will return a reference to the zero object for the value type if
- // the key is not in the map.
- // NOTE: The returned pointer may keep the whole map live, so don't
- // hold onto it for very long.
- func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- pc := funcPC(mapaccess1)
- racereadpc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero)
- }
- alg := goalg(t.key.alg)
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
- m := uintptr(1)<<h.B - 1
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- if alg.equal(key, k, uintptr(t.key.size)) {
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- return v
- }
- }
- b = b.overflow(t)
- if b == nil {
- return unsafe.Pointer(t.elem.zero)
- }
- }
- }
- func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- pc := funcPC(mapaccess2)
- racereadpc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero), false
- }
- alg := goalg(t.key.alg)
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
- m := uintptr(1)<<h.B - 1
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- if alg.equal(key, k, uintptr(t.key.size)) {
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- return v, true
- }
- }
- b = b.overflow(t)
- if b == nil {
- return unsafe.Pointer(t.elem.zero), false
- }
- }
- }
- // returns both key and value. Used by map iterator
- func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
- if h == nil || h.count == 0 {
- return nil, nil
- }
- alg := goalg(t.key.alg)
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
- m := uintptr(1)<<h.B - 1
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- if alg.equal(key, k, uintptr(t.key.size)) {
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- return k, v
- }
- }
- b = b.overflow(t)
- if b == nil {
- return nil, nil
- }
- }
- }
- func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
- if h == nil {
- panic("assignment to entry in nil map")
- }
- if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- pc := funcPC(mapassign1)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- raceReadObjectPC(t.elem, val, callerpc, pc)
- }
- alg := goalg(t.key.alg)
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
- if h.buckets == nil {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- h.buckets = newarray(t.bucket, 1)
- }
- again:
- bucket := hash & (uintptr(1)<<h.B - 1)
- if h.oldbuckets != nil {
- growWork(t, h, bucket)
- }
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- var inserti *uint8
- var insertk unsafe.Pointer
- var insertv unsafe.Pointer
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == empty && inserti == nil {
- inserti = &b.tophash[i]
- insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
- if !alg.equal(key, k2, uintptr(t.key.size)) {
- continue
- }
- // already have a mapping for key. Update it.
- memmove(k2, key, uintptr(t.key.size))
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- v2 := v
- if t.indirectvalue {
- v2 = *((*unsafe.Pointer)(v2))
- }
- memmove(v2, val, uintptr(t.elem.size))
- return
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
- // did not find mapping for key. Allocate new cell & add entry.
- if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt {
- hashGrow(t, h)
- goto again // Growing the table invalidates everything, so try again
- }
- if inserti == nil {
- // all current buckets are full, allocate a new one.
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- newb := (*bmap)(newobject(t.bucket))
- b.setoverflow(t, newb)
- inserti = &newb.tophash[0]
- insertk = add(unsafe.Pointer(newb), dataOffset)
- insertv = add(insertk, bucketCnt*uintptr(t.keysize))
- }
- // store new key/value at insert position
- if t.indirectkey {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- kmem := newobject(t.key)
- *(*unsafe.Pointer)(insertk) = kmem
- insertk = kmem
- }
- if t.indirectvalue {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- vmem := newobject(t.elem)
- *(*unsafe.Pointer)(insertv) = vmem
- insertv = vmem
- }
- memmove(insertk, key, uintptr(t.key.size))
- memmove(insertv, val, uintptr(t.elem.size))
- *inserti = top
- h.count++
- }
- func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- pc := funcPC(mapdelete)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if h == nil || h.count == 0 {
- return
- }
- alg := goalg(t.key.alg)
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
- bucket := hash & (uintptr(1)<<h.B - 1)
- if h.oldbuckets != nil {
- growWork(t, h, bucket)
- }
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
- if !alg.equal(key, k2, uintptr(t.key.size)) {
- continue
- }
- memclr(k, uintptr(t.keysize))
- v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize))
- memclr(v, uintptr(t.valuesize))
- b.tophash[i] = empty
- h.count--
- return
- }
- b = b.overflow(t)
- if b == nil {
- return
- }
- }
- }
- func mapiterinit(t *maptype, h *hmap, it *hiter) {
- // Clear pointer fields so garbage collector does not complain.
- it.key = nil
- it.value = nil
- it.t = nil
- it.h = nil
- it.buckets = nil
- it.bptr = nil
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
- }
- if h == nil || h.count == 0 {
- it.key = nil
- it.value = nil
- return
- }
- if unsafe.Sizeof(hiter{})/ptrSize != 10 {
- gothrow("hash_iter size incorrect") // see ../../cmd/gc/reflect.c
- }
- it.t = t
- it.h = h
- // grab snapshot of bucket state
- it.B = h.B
- it.buckets = h.buckets
- // decide where to start
- r := uintptr(fastrand1())
- if h.B > 31-bucketCntBits {
- r += uintptr(fastrand1()) << 31
- }
- it.startBucket = r & (uintptr(1)<<h.B - 1)
- it.offset = uint8(r >> h.B & (bucketCnt - 1))
- // iterator state
- it.bucket = it.startBucket
- it.wrapped = false
- it.bptr = nil
- // Remember we have an iterator.
- // Can run concurrently with another hash_iter_init().
- for {
- old := h.flags
- if old == old|iterator|oldIterator {
- break
- }
- if cas(&h.flags, old, old|iterator|oldIterator) {
- break
- }
- }
- mapiternext(it)
- }
- func mapiternext(it *hiter) {
- h := it.h
- if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&it))
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
- }
- t := it.t
- bucket := it.bucket
- b := it.bptr
- i := it.i
- checkBucket := it.checkBucket
- alg := goalg(t.key.alg)
- next:
- if b == nil {
- if bucket == it.startBucket && it.wrapped {
- // end of iteration
- it.key = nil
- it.value = nil
- return
- }
- if h.oldbuckets != nil && it.B == h.B {
- // Iterator was started in the middle of a grow, and the grow isn't done yet.
- // If the bucket we're looking at hasn't been filled in yet (i.e. the old
- // bucket hasn't been evacuated) then we need to iterate through the old
- // bucket and only return the ones that will be migrated to this bucket.
- oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1)
- b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- if !evacuated(b) {
- checkBucket = bucket
- } else {
- b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
- checkBucket = noCheck
- }
- } else {
- b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
- checkBucket = noCheck
- }
- bucket++
- if bucket == uintptr(1)<<it.B {
- bucket = 0
- it.wrapped = true
- }
- i = 0
- }
- for ; i < bucketCnt; i++ {
- offi := (i + it.offset) & (bucketCnt - 1)
- k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
- if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty {
- if checkBucket != noCheck {
- // Special case: iterator was started during a grow and the
- // grow is not done yet. We're working on a bucket whose
- // oldbucket has not been evacuated yet. Or at least, it wasn't
- // evacuated when we started the bucket. So we're iterating
- // through the oldbucket, skipping any keys that will go
- // to the other new bucket (each oldbucket expands to two
- // buckets during a grow).
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
- if alg.equal(k2, k2, uintptr(t.key.size)) {
- // If the item in the oldbucket is not destined for
- // the current new bucket in the iteration, skip it.
- hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0))
- if hash&(uintptr(1)<<it.B-1) != checkBucket {
- continue
- }
- } else {
- // Hash isn't repeatable if k != k (NaNs). We need a
- // repeatable and randomish choice of which direction
- // to send NaNs during evacuation. We'll use the low
- // bit of tophash to decide which way NaNs go.
- // NOTE: this case is why we need two evacuate tophash
- // values, evacuatedX and evacuatedY, that differ in
- // their low bit.
- if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
- continue
- }
- }
- }
- if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
- // this is the golden data, we can return it.
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- it.key = k
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- it.value = v
- } else {
- // The hash table has grown since the iterator was started.
- // The golden data for this key is now somewhere else.
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
- if alg.equal(k2, k2, uintptr(t.key.size)) {
- // Check the current hash table for the data.
- // This code handles the case where the key
- // has been deleted, updated, or deleted and reinserted.
- // NOTE: we need to regrab the key as it has potentially been
- // updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
- rk, rv := mapaccessK(t, h, k2)
- if rk == nil {
- continue // key has been deleted
- }
- it.key = rk
- it.value = rv
- } else {
- // if key!=key then the entry can't be deleted or
- // updated, so we can just return it. That's lucky for
- // us because when key!=key we can't look it up
- // successfully in the current table.
- it.key = k2
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- it.value = v
- }
- }
- it.bucket = bucket
- it.bptr = b
- it.i = i + 1
- it.checkBucket = checkBucket
- return
- }
- }
- b = b.overflow(t)
- i = 0
- goto next
- }
- func hashGrow(t *maptype, h *hmap) {
- if h.oldbuckets != nil {
- gothrow("evacuation not done in time")
- }
- oldbuckets := h.buckets
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- newbuckets := newarray(t.bucket, uintptr(1)<<(h.B+1))
- flags := h.flags &^ (iterator | oldIterator)
- if h.flags&iterator != 0 {
- flags |= oldIterator
- }
- // commit the grow (atomic wrt gc)
- h.B++
- h.flags = flags
- h.oldbuckets = oldbuckets
- h.buckets = newbuckets
- h.nevacuate = 0
- // the actual copying of the hash table data is done incrementally
- // by growWork() and evacuate().
- }
- func growWork(t *maptype, h *hmap, bucket uintptr) {
- noldbuckets := uintptr(1) << (h.B - 1)
- // make sure we evacuate the oldbucket corresponding
- // to the bucket we're about to use
- evacuate(t, h, bucket&(noldbuckets-1))
- // evacuate one more oldbucket to make progress on growing
- if h.oldbuckets != nil {
- evacuate(t, h, h.nevacuate)
- }
- }
- func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- newbit := uintptr(1) << (h.B - 1)
- alg := goalg(t.key.alg)
- if !evacuated(b) {
- // TODO: reuse overflow buckets instead of using new ones, if there
- // is no iterator using the old buckets. (If !oldIterator.)
- x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
- y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
- xi := 0
- yi := 0
- xk := add(unsafe.Pointer(x), dataOffset)
- yk := add(unsafe.Pointer(y), dataOffset)
- xv := add(xk, bucketCnt*uintptr(t.keysize))
- yv := add(yk, bucketCnt*uintptr(t.keysize))
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- v := add(k, bucketCnt*uintptr(t.keysize))
- for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
- top := b.tophash[i]
- if top == empty {
- b.tophash[i] = evacuatedEmpty
- continue
- }
- if top < minTopHash {
- gothrow("bad map state")
- }
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
- // Compute hash to make our evacuation decision (whether we need
- // to send this key/value to bucket x or bucket y).
- hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0))
- if h.flags&iterator != 0 {
- if !alg.equal(k2, k2, uintptr(t.key.size)) {
- // If key != key (NaNs), then the hash could be (and probably
- // will be) entirely different from the old hash. Moreover,
- // it isn't reproducible. Reproducibility is required in the
- // presence of iterators, as our evacuation decision must
- // match whatever decision the iterator made.
- // Fortunately, we have the freedom to send these keys either
- // way. Also, tophash is meaningless for these kinds of keys.
- // We let the low bit of tophash drive the evacuation decision.
- // We recompute a new random tophash for the next level so
- // these keys will get evenly distributed across all buckets
- // after multiple grows.
- if (top & 1) != 0 {
- hash |= newbit
- } else {
- hash &^= newbit
- }
- top = uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- }
- }
- if (hash & newbit) == 0 {
- b.tophash[i] = evacuatedX
- if xi == bucketCnt {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- newx := (*bmap)(newobject(t.bucket))
- x.setoverflow(t, newx)
- x = newx
- xi = 0
- xk = add(unsafe.Pointer(x), dataOffset)
- xv = add(xk, bucketCnt*uintptr(t.keysize))
- }
- x.tophash[xi] = top
- if t.indirectkey {
- *(*unsafe.Pointer)(xk) = k2 // copy pointer
- } else {
- memmove(xk, k, uintptr(t.key.size)) // copy value
- }
- if t.indirectvalue {
- *(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
- } else {
- memmove(xv, v, uintptr(t.elem.size))
- }
- xi++
- xk = add(xk, uintptr(t.keysize))
- xv = add(xv, uintptr(t.valuesize))
- } else {
- b.tophash[i] = evacuatedY
- if yi == bucketCnt {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- newy := (*bmap)(newobject(t.bucket))
- y.setoverflow(t, newy)
- y = newy
- yi = 0
- yk = add(unsafe.Pointer(y), dataOffset)
- yv = add(yk, bucketCnt*uintptr(t.keysize))
- }
- y.tophash[yi] = top
- if t.indirectkey {
- *(*unsafe.Pointer)(yk) = k2
- } else {
- memmove(yk, k, uintptr(t.key.size))
- }
- if t.indirectvalue {
- *(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
- } else {
- memmove(yv, v, uintptr(t.elem.size))
- }
- yi++
- yk = add(yk, uintptr(t.keysize))
- yv = add(yv, uintptr(t.valuesize))
- }
- }
- }
- // Unlink the overflow buckets & clear key/value to help GC.
- if h.flags&oldIterator == 0 {
- b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
- }
- }
- // Advance evacuation mark
- if oldbucket == h.nevacuate {
- h.nevacuate = oldbucket + 1
- if oldbucket+1 == newbit { // newbit == # of oldbuckets
- // Growing is all done. Free old main bucket array.
- h.oldbuckets = nil
- }
- }
- }
- func ismapkey(t *_type) bool {
- return goalg(t.alg).hash != nil
- }
- // Reflect stubs. Called from ../reflect/asm_*.s
- func reflect_makemap(t *maptype) *hmap {
- return makemap(t, 0)
- }
- func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- val, ok := mapaccess2(t, h, key)
- if !ok {
- // reflect wants nil for a missing element
- val = nil
- }
- return val
- }
- func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
- mapassign1(t, h, key, val)
- }
- func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
- mapdelete(t, h, key)
- }
- func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
- it := new(hiter)
- mapiterinit(t, h, it)
- return it
- }
- func reflect_mapiternext(it *hiter) {
- mapiternext(it)
- }
- func reflect_mapiterkey(it *hiter) unsafe.Pointer {
- return it.key
- }
- func reflect_maplen(h *hmap) int {
- if h == nil {
- return 0
- }
- if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&h))
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
- }
- return h.count
- }
- func reflect_ismapkey(t *_type) bool {
- return ismapkey(t)
- }
|