123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961 |
- package runtime
- import (
- "unsafe"
- )
- const (
-
- bucketCntBits = 3
- bucketCnt = 1 << bucketCntBits
-
- loadFactor = 6.5
-
-
-
-
- maxKeySize = 128
- maxValueSize = 128
-
-
-
- dataOffset = unsafe.Offsetof(struct {
- b bmap
- v int64
- }{}.v)
-
-
-
-
- empty = 0
- evacuatedEmpty = 1
- evacuatedX = 2
- evacuatedY = 3
- minTopHash = 4
-
- iterator = 1
- oldIterator = 2
-
- noCheck = 1<<(8*ptrSize) - 1
-
- checkgc = false
- )
- type hmap struct {
-
-
- count int
- flags uint32
- hash0 uint32
- B uint8
- buckets unsafe.Pointer
- oldbuckets unsafe.Pointer
- nevacuate uintptr
- }
- type bmap struct {
- tophash [bucketCnt]uint8
-
-
-
-
-
- }
- type hiter struct {
- key unsafe.Pointer
- value unsafe.Pointer
- t *maptype
- h *hmap
- buckets unsafe.Pointer
- bptr *bmap
- startBucket uintptr
- offset uint8
- wrapped bool
- B uint8
- i uint8
- bucket uintptr
- checkBucket uintptr
- }
- func evacuated(b *bmap) bool {
- h := b.tophash[0]
- return h > empty && h < minTopHash
- }
- func (b *bmap) overflow(t *maptype) *bmap {
- return *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize))
- }
- func (b *bmap) setoverflow(t *maptype, ovf *bmap) {
- *(**bmap)(add(unsafe.Pointer(b), uintptr(t.bucketsize)-regSize)) = ovf
- }
- func makemap(t *maptype, hint int64) *hmap {
- if sz := unsafe.Sizeof(hmap{}); sz > 48 || sz != uintptr(t.hmap.size) {
- gothrow("bad hmap size")
- }
- if hint < 0 || int64(int32(hint)) != hint {
- panic("makemap: size out of range")
-
- }
- if !ismapkey(t.key) {
- gothrow("runtime.makemap: unsupported map key type")
- }
-
- if t.key.size > maxKeySize && (!t.indirectkey || t.keysize != uint8(ptrSize)) ||
- t.key.size <= maxKeySize && (t.indirectkey || t.keysize != uint8(t.key.size)) {
- gothrow("key size wrong")
- }
- if t.elem.size > maxValueSize && (!t.indirectvalue || t.valuesize != uint8(ptrSize)) ||
- t.elem.size <= maxValueSize && (t.indirectvalue || t.valuesize != uint8(t.elem.size)) {
- gothrow("value size wrong")
- }
-
-
- if t.key.align > bucketCnt {
- gothrow("key align too big")
- }
- if t.elem.align > bucketCnt {
- gothrow("value align too big")
- }
- if uintptr(t.key.size)%uintptr(t.key.align) != 0 {
- gothrow("key size not a multiple of key align")
- }
- if uintptr(t.elem.size)%uintptr(t.elem.align) != 0 {
- gothrow("value size not a multiple of value align")
- }
- if bucketCnt < 8 {
- gothrow("bucketsize too small for proper alignment")
- }
- if dataOffset%uintptr(t.key.align) != 0 {
- gothrow("need padding in bucket (key)")
- }
- if dataOffset%uintptr(t.elem.align) != 0 {
- gothrow("need padding in bucket (value)")
- }
-
- B := uint8(0)
- for ; hint > bucketCnt && float32(hint) > loadFactor*float32(uintptr(1)<<B); B++ {
- }
-
-
-
- var buckets unsafe.Pointer
- if B != 0 {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- buckets = newarray(t.bucket, uintptr(1)<<B)
- }
-
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- h := (*hmap)(newobject(t.hmap))
- h.count = 0
- h.B = B
- h.flags = 0
- h.hash0 = fastrand1()
- h.buckets = buckets
- h.oldbuckets = nil
- h.nevacuate = 0
- return h
- }
- func mapaccess1(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- pc := funcPC(mapaccess1)
- racereadpc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero)
- }
- alg := goalg(t.key.alg)
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
- m := uintptr(1)<<h.B - 1
- b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- if alg.equal(key, k, uintptr(t.key.size)) {
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- return v
- }
- }
- b = b.overflow(t)
- if b == nil {
- return unsafe.Pointer(t.elem.zero)
- }
- }
- }
- func mapaccess2(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, bool) {
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- pc := funcPC(mapaccess2)
- racereadpc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if h == nil || h.count == 0 {
- return unsafe.Pointer(t.elem.zero), false
- }
- alg := goalg(t.key.alg)
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
- m := uintptr(1)<<h.B - 1
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- if alg.equal(key, k, uintptr(t.key.size)) {
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- return v, true
- }
- }
- b = b.overflow(t)
- if b == nil {
- return unsafe.Pointer(t.elem.zero), false
- }
- }
- }
- func mapaccessK(t *maptype, h *hmap, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer) {
- if h == nil || h.count == 0 {
- return nil, nil
- }
- alg := goalg(t.key.alg)
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
- m := uintptr(1)<<h.B - 1
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + (hash&m)*uintptr(t.bucketsize)))
- if c := h.oldbuckets; c != nil {
- oldb := (*bmap)(unsafe.Pointer(uintptr(c) + (hash&(m>>1))*uintptr(t.bucketsize)))
- if !evacuated(oldb) {
- b = oldb
- }
- }
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- if alg.equal(key, k, uintptr(t.key.size)) {
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- return k, v
- }
- }
- b = b.overflow(t)
- if b == nil {
- return nil, nil
- }
- }
- }
- func mapassign1(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
- if h == nil {
- panic("assignment to entry in nil map")
- }
- if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- pc := funcPC(mapassign1)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- raceReadObjectPC(t.elem, val, callerpc, pc)
- }
- alg := goalg(t.key.alg)
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
- if h.buckets == nil {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- h.buckets = newarray(t.bucket, 1)
- }
- again:
- bucket := hash & (uintptr(1)<<h.B - 1)
- if h.oldbuckets != nil {
- growWork(t, h, bucket)
- }
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- var inserti *uint8
- var insertk unsafe.Pointer
- var insertv unsafe.Pointer
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- if b.tophash[i] == empty && inserti == nil {
- inserti = &b.tophash[i]
- insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- insertv = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- }
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
- if !alg.equal(key, k2, uintptr(t.key.size)) {
- continue
- }
-
- memmove(k2, key, uintptr(t.key.size))
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
- v2 := v
- if t.indirectvalue {
- v2 = *((*unsafe.Pointer)(v2))
- }
- memmove(v2, val, uintptr(t.elem.size))
- return
- }
- ovf := b.overflow(t)
- if ovf == nil {
- break
- }
- b = ovf
- }
-
- if float32(h.count) >= loadFactor*float32((uintptr(1)<<h.B)) && h.count >= bucketCnt {
- hashGrow(t, h)
- goto again
- }
- if inserti == nil {
-
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- newb := (*bmap)(newobject(t.bucket))
- b.setoverflow(t, newb)
- inserti = &newb.tophash[0]
- insertk = add(unsafe.Pointer(newb), dataOffset)
- insertv = add(insertk, bucketCnt*uintptr(t.keysize))
- }
-
- if t.indirectkey {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- kmem := newobject(t.key)
- *(*unsafe.Pointer)(insertk) = kmem
- insertk = kmem
- }
- if t.indirectvalue {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- vmem := newobject(t.elem)
- *(*unsafe.Pointer)(insertv) = vmem
- insertv = vmem
- }
- memmove(insertk, key, uintptr(t.key.size))
- memmove(insertv, val, uintptr(t.elem.size))
- *inserti = top
- h.count++
- }
- func mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- pc := funcPC(mapdelete)
- racewritepc(unsafe.Pointer(h), callerpc, pc)
- raceReadObjectPC(t.key, key, callerpc, pc)
- }
- if h == nil || h.count == 0 {
- return
- }
- alg := goalg(t.key.alg)
- hash := alg.hash(key, uintptr(t.key.size), uintptr(h.hash0))
- bucket := hash & (uintptr(1)<<h.B - 1)
- if h.oldbuckets != nil {
- growWork(t, h, bucket)
- }
- b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
- top := uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- for {
- for i := uintptr(0); i < bucketCnt; i++ {
- if b.tophash[i] != top {
- continue
- }
- k := add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
- if !alg.equal(key, k2, uintptr(t.key.size)) {
- continue
- }
- memclr(k, uintptr(t.keysize))
- v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*uintptr(t.keysize) + i*uintptr(t.valuesize))
- memclr(v, uintptr(t.valuesize))
- b.tophash[i] = empty
- h.count--
- return
- }
- b = b.overflow(t)
- if b == nil {
- return
- }
- }
- }
- func mapiterinit(t *maptype, h *hmap, it *hiter) {
-
- it.key = nil
- it.value = nil
- it.t = nil
- it.h = nil
- it.buckets = nil
- it.bptr = nil
- if raceenabled && h != nil {
- callerpc := getcallerpc(unsafe.Pointer(&t))
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiterinit))
- }
- if h == nil || h.count == 0 {
- it.key = nil
- it.value = nil
- return
- }
- if unsafe.Sizeof(hiter{})/ptrSize != 10 {
- gothrow("hash_iter size incorrect")
- }
- it.t = t
- it.h = h
-
- it.B = h.B
- it.buckets = h.buckets
-
- r := uintptr(fastrand1())
- if h.B > 31-bucketCntBits {
- r += uintptr(fastrand1()) << 31
- }
- it.startBucket = r & (uintptr(1)<<h.B - 1)
- it.offset = uint8(r >> h.B & (bucketCnt - 1))
-
- it.bucket = it.startBucket
- it.wrapped = false
- it.bptr = nil
-
-
- for {
- old := h.flags
- if old == old|iterator|oldIterator {
- break
- }
- if cas(&h.flags, old, old|iterator|oldIterator) {
- break
- }
- }
- mapiternext(it)
- }
- func mapiternext(it *hiter) {
- h := it.h
- if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&it))
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapiternext))
- }
- t := it.t
- bucket := it.bucket
- b := it.bptr
- i := it.i
- checkBucket := it.checkBucket
- alg := goalg(t.key.alg)
- next:
- if b == nil {
- if bucket == it.startBucket && it.wrapped {
-
- it.key = nil
- it.value = nil
- return
- }
- if h.oldbuckets != nil && it.B == h.B {
-
-
-
-
- oldbucket := bucket & (uintptr(1)<<(it.B-1) - 1)
- b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- if !evacuated(b) {
- checkBucket = bucket
- } else {
- b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
- checkBucket = noCheck
- }
- } else {
- b = (*bmap)(add(it.buckets, bucket*uintptr(t.bucketsize)))
- checkBucket = noCheck
- }
- bucket++
- if bucket == uintptr(1)<<it.B {
- bucket = 0
- it.wrapped = true
- }
- i = 0
- }
- for ; i < bucketCnt; i++ {
- offi := (i + it.offset) & (bucketCnt - 1)
- k := add(unsafe.Pointer(b), dataOffset+uintptr(offi)*uintptr(t.keysize))
- v := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.valuesize))
- if b.tophash[offi] != empty && b.tophash[offi] != evacuatedEmpty {
- if checkBucket != noCheck {
-
-
-
-
-
-
-
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
- if alg.equal(k2, k2, uintptr(t.key.size)) {
-
-
- hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0))
- if hash&(uintptr(1)<<it.B-1) != checkBucket {
- continue
- }
- } else {
-
-
-
-
-
-
-
- if checkBucket>>(it.B-1) != uintptr(b.tophash[offi]&1) {
- continue
- }
- }
- }
- if b.tophash[offi] != evacuatedX && b.tophash[offi] != evacuatedY {
-
- if t.indirectkey {
- k = *((*unsafe.Pointer)(k))
- }
- it.key = k
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- it.value = v
- } else {
-
-
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
- if alg.equal(k2, k2, uintptr(t.key.size)) {
-
-
-
-
-
- rk, rv := mapaccessK(t, h, k2)
- if rk == nil {
- continue
- }
- it.key = rk
- it.value = rv
- } else {
-
-
-
-
- it.key = k2
- if t.indirectvalue {
- v = *((*unsafe.Pointer)(v))
- }
- it.value = v
- }
- }
- it.bucket = bucket
- it.bptr = b
- it.i = i + 1
- it.checkBucket = checkBucket
- return
- }
- }
- b = b.overflow(t)
- i = 0
- goto next
- }
- func hashGrow(t *maptype, h *hmap) {
- if h.oldbuckets != nil {
- gothrow("evacuation not done in time")
- }
- oldbuckets := h.buckets
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- newbuckets := newarray(t.bucket, uintptr(1)<<(h.B+1))
- flags := h.flags &^ (iterator | oldIterator)
- if h.flags&iterator != 0 {
- flags |= oldIterator
- }
-
- h.B++
- h.flags = flags
- h.oldbuckets = oldbuckets
- h.buckets = newbuckets
- h.nevacuate = 0
-
-
- }
- func growWork(t *maptype, h *hmap, bucket uintptr) {
- noldbuckets := uintptr(1) << (h.B - 1)
-
-
- evacuate(t, h, bucket&(noldbuckets-1))
-
- if h.oldbuckets != nil {
- evacuate(t, h, h.nevacuate)
- }
- }
- func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
- b := (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- newbit := uintptr(1) << (h.B - 1)
- alg := goalg(t.key.alg)
- if !evacuated(b) {
-
-
- x := (*bmap)(add(h.buckets, oldbucket*uintptr(t.bucketsize)))
- y := (*bmap)(add(h.buckets, (oldbucket+newbit)*uintptr(t.bucketsize)))
- xi := 0
- yi := 0
- xk := add(unsafe.Pointer(x), dataOffset)
- yk := add(unsafe.Pointer(y), dataOffset)
- xv := add(xk, bucketCnt*uintptr(t.keysize))
- yv := add(yk, bucketCnt*uintptr(t.keysize))
- for ; b != nil; b = b.overflow(t) {
- k := add(unsafe.Pointer(b), dataOffset)
- v := add(k, bucketCnt*uintptr(t.keysize))
- for i := 0; i < bucketCnt; i, k, v = i+1, add(k, uintptr(t.keysize)), add(v, uintptr(t.valuesize)) {
- top := b.tophash[i]
- if top == empty {
- b.tophash[i] = evacuatedEmpty
- continue
- }
- if top < minTopHash {
- gothrow("bad map state")
- }
- k2 := k
- if t.indirectkey {
- k2 = *((*unsafe.Pointer)(k2))
- }
-
-
- hash := alg.hash(k2, uintptr(t.key.size), uintptr(h.hash0))
- if h.flags&iterator != 0 {
- if !alg.equal(k2, k2, uintptr(t.key.size)) {
-
-
-
-
-
-
-
-
-
-
-
- if (top & 1) != 0 {
- hash |= newbit
- } else {
- hash &^= newbit
- }
- top = uint8(hash >> (ptrSize*8 - 8))
- if top < minTopHash {
- top += minTopHash
- }
- }
- }
- if (hash & newbit) == 0 {
- b.tophash[i] = evacuatedX
- if xi == bucketCnt {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- newx := (*bmap)(newobject(t.bucket))
- x.setoverflow(t, newx)
- x = newx
- xi = 0
- xk = add(unsafe.Pointer(x), dataOffset)
- xv = add(xk, bucketCnt*uintptr(t.keysize))
- }
- x.tophash[xi] = top
- if t.indirectkey {
- *(*unsafe.Pointer)(xk) = k2
- } else {
- memmove(xk, k, uintptr(t.key.size))
- }
- if t.indirectvalue {
- *(*unsafe.Pointer)(xv) = *(*unsafe.Pointer)(v)
- } else {
- memmove(xv, v, uintptr(t.elem.size))
- }
- xi++
- xk = add(xk, uintptr(t.keysize))
- xv = add(xv, uintptr(t.valuesize))
- } else {
- b.tophash[i] = evacuatedY
- if yi == bucketCnt {
- if checkgc {
- memstats.next_gc = memstats.heap_alloc
- }
- newy := (*bmap)(newobject(t.bucket))
- y.setoverflow(t, newy)
- y = newy
- yi = 0
- yk = add(unsafe.Pointer(y), dataOffset)
- yv = add(yk, bucketCnt*uintptr(t.keysize))
- }
- y.tophash[yi] = top
- if t.indirectkey {
- *(*unsafe.Pointer)(yk) = k2
- } else {
- memmove(yk, k, uintptr(t.key.size))
- }
- if t.indirectvalue {
- *(*unsafe.Pointer)(yv) = *(*unsafe.Pointer)(v)
- } else {
- memmove(yv, v, uintptr(t.elem.size))
- }
- yi++
- yk = add(yk, uintptr(t.keysize))
- yv = add(yv, uintptr(t.valuesize))
- }
- }
- }
-
- if h.flags&oldIterator == 0 {
- b = (*bmap)(add(h.oldbuckets, oldbucket*uintptr(t.bucketsize)))
- memclr(add(unsafe.Pointer(b), dataOffset), uintptr(t.bucketsize)-dataOffset)
- }
- }
-
- if oldbucket == h.nevacuate {
- h.nevacuate = oldbucket + 1
- if oldbucket+1 == newbit {
-
- h.oldbuckets = nil
- }
- }
- }
- func ismapkey(t *_type) bool {
- return goalg(t.alg).hash != nil
- }
- func reflect_makemap(t *maptype) *hmap {
- return makemap(t, 0)
- }
- func reflect_mapaccess(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
- val, ok := mapaccess2(t, h, key)
- if !ok {
-
- val = nil
- }
- return val
- }
- func reflect_mapassign(t *maptype, h *hmap, key unsafe.Pointer, val unsafe.Pointer) {
- mapassign1(t, h, key, val)
- }
- func reflect_mapdelete(t *maptype, h *hmap, key unsafe.Pointer) {
- mapdelete(t, h, key)
- }
- func reflect_mapiterinit(t *maptype, h *hmap) *hiter {
- it := new(hiter)
- mapiterinit(t, h, it)
- return it
- }
- func reflect_mapiternext(it *hiter) {
- mapiternext(it)
- }
- func reflect_mapiterkey(it *hiter) unsafe.Pointer {
- return it.key
- }
- func reflect_maplen(h *hmap) int {
- if h == nil {
- return 0
- }
- if raceenabled {
- callerpc := getcallerpc(unsafe.Pointer(&h))
- racereadpc(unsafe.Pointer(h), callerpc, funcPC(reflect_maplen))
- }
- return h.count
- }
- func reflect_ismapkey(t *_type) bool {
- return ismapkey(t)
- }
|