hashmap_fast.go 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380
  1. // Copyright 2014 The Go Authors. All rights reserved.
  2. // Use of this source code is governed by a BSD-style
  3. // license that can be found in the LICENSE file.
  4. package runtime
  5. import (
  6. "unsafe"
  7. )
  8. func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
  9. if raceenabled && h != nil {
  10. callerpc := getcallerpc(unsafe.Pointer(&t))
  11. racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
  12. }
  13. if h == nil || h.count == 0 {
  14. return unsafe.Pointer(t.elem.zero)
  15. }
  16. var b *bmap
  17. if h.B == 0 {
  18. // One-bucket table. No need to hash.
  19. b = (*bmap)(h.buckets)
  20. } else {
  21. hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0))
  22. m := uintptr(1)<<h.B - 1
  23. b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
  24. if c := h.oldbuckets; c != nil {
  25. oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
  26. if !evacuated(oldb) {
  27. b = oldb
  28. }
  29. }
  30. }
  31. for {
  32. for i := uintptr(0); i < bucketCnt; i++ {
  33. k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
  34. if k != key {
  35. continue
  36. }
  37. x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
  38. if x == empty {
  39. continue
  40. }
  41. return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
  42. }
  43. b = b.overflow(t)
  44. if b == nil {
  45. return unsafe.Pointer(t.elem.zero)
  46. }
  47. }
  48. }
  49. func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
  50. if raceenabled && h != nil {
  51. callerpc := getcallerpc(unsafe.Pointer(&t))
  52. racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
  53. }
  54. if h == nil || h.count == 0 {
  55. return unsafe.Pointer(t.elem.zero), false
  56. }
  57. var b *bmap
  58. if h.B == 0 {
  59. // One-bucket table. No need to hash.
  60. b = (*bmap)(h.buckets)
  61. } else {
  62. hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 4, uintptr(h.hash0))
  63. m := uintptr(1)<<h.B - 1
  64. b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
  65. if c := h.oldbuckets; c != nil {
  66. oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
  67. if !evacuated(oldb) {
  68. b = oldb
  69. }
  70. }
  71. }
  72. for {
  73. for i := uintptr(0); i < bucketCnt; i++ {
  74. k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
  75. if k != key {
  76. continue
  77. }
  78. x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
  79. if x == empty {
  80. continue
  81. }
  82. return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
  83. }
  84. b = b.overflow(t)
  85. if b == nil {
  86. return unsafe.Pointer(t.elem.zero), false
  87. }
  88. }
  89. }
  90. func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
  91. if raceenabled && h != nil {
  92. callerpc := getcallerpc(unsafe.Pointer(&t))
  93. racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
  94. }
  95. if h == nil || h.count == 0 {
  96. return unsafe.Pointer(t.elem.zero)
  97. }
  98. var b *bmap
  99. if h.B == 0 {
  100. // One-bucket table. No need to hash.
  101. b = (*bmap)(h.buckets)
  102. } else {
  103. hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0))
  104. m := uintptr(1)<<h.B - 1
  105. b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
  106. if c := h.oldbuckets; c != nil {
  107. oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
  108. if !evacuated(oldb) {
  109. b = oldb
  110. }
  111. }
  112. }
  113. for {
  114. for i := uintptr(0); i < bucketCnt; i++ {
  115. k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
  116. if k != key {
  117. continue
  118. }
  119. x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
  120. if x == empty {
  121. continue
  122. }
  123. return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
  124. }
  125. b = b.overflow(t)
  126. if b == nil {
  127. return unsafe.Pointer(t.elem.zero)
  128. }
  129. }
  130. }
  131. func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
  132. if raceenabled && h != nil {
  133. callerpc := getcallerpc(unsafe.Pointer(&t))
  134. racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
  135. }
  136. if h == nil || h.count == 0 {
  137. return unsafe.Pointer(t.elem.zero), false
  138. }
  139. var b *bmap
  140. if h.B == 0 {
  141. // One-bucket table. No need to hash.
  142. b = (*bmap)(h.buckets)
  143. } else {
  144. hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&key)), 8, uintptr(h.hash0))
  145. m := uintptr(1)<<h.B - 1
  146. b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
  147. if c := h.oldbuckets; c != nil {
  148. oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
  149. if !evacuated(oldb) {
  150. b = oldb
  151. }
  152. }
  153. }
  154. for {
  155. for i := uintptr(0); i < bucketCnt; i++ {
  156. k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
  157. if k != key {
  158. continue
  159. }
  160. x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
  161. if x == empty {
  162. continue
  163. }
  164. return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
  165. }
  166. b = b.overflow(t)
  167. if b == nil {
  168. return unsafe.Pointer(t.elem.zero), false
  169. }
  170. }
  171. }
  172. func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
  173. if raceenabled && h != nil {
  174. callerpc := getcallerpc(unsafe.Pointer(&t))
  175. racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
  176. }
  177. if h == nil || h.count == 0 {
  178. return unsafe.Pointer(t.elem.zero)
  179. }
  180. key := (*stringStruct)(unsafe.Pointer(&ky))
  181. if h.B == 0 {
  182. // One-bucket table.
  183. b := (*bmap)(h.buckets)
  184. if key.len < 32 {
  185. // short key, doing lots of comparisons is ok
  186. for i := uintptr(0); i < bucketCnt; i++ {
  187. x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
  188. if x == empty {
  189. continue
  190. }
  191. k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
  192. if k.len != key.len {
  193. continue
  194. }
  195. if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
  196. return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
  197. }
  198. }
  199. return unsafe.Pointer(t.elem.zero)
  200. }
  201. // long key, try not to do more comparisons than necessary
  202. keymaybe := uintptr(bucketCnt)
  203. for i := uintptr(0); i < bucketCnt; i++ {
  204. x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
  205. if x == empty {
  206. continue
  207. }
  208. k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
  209. if k.len != key.len {
  210. continue
  211. }
  212. if k.str == key.str {
  213. return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
  214. }
  215. // check first 4 bytes
  216. // TODO: on amd64/386 at least, make this compile to one 4-byte comparison instead of
  217. // four 1-byte comparisons.
  218. if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
  219. continue
  220. }
  221. // check last 4 bytes
  222. if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
  223. continue
  224. }
  225. if keymaybe != bucketCnt {
  226. // Two keys are potential matches. Use hash to distinguish them.
  227. goto dohash
  228. }
  229. keymaybe = i
  230. }
  231. if keymaybe != bucketCnt {
  232. k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize))
  233. if memeq(k.str, key.str, uintptr(key.len)) {
  234. return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize))
  235. }
  236. }
  237. return unsafe.Pointer(t.elem.zero)
  238. }
  239. dohash:
  240. hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0))
  241. m := uintptr(1)<<h.B - 1
  242. b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
  243. if c := h.oldbuckets; c != nil {
  244. oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
  245. if !evacuated(oldb) {
  246. b = oldb
  247. }
  248. }
  249. top := uint8(hash >> (ptrSize*8 - 8))
  250. if top < minTopHash {
  251. top += minTopHash
  252. }
  253. for {
  254. for i := uintptr(0); i < bucketCnt; i++ {
  255. x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
  256. if x != top {
  257. continue
  258. }
  259. k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
  260. if k.len != key.len {
  261. continue
  262. }
  263. if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
  264. return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize))
  265. }
  266. }
  267. b = b.overflow(t)
  268. if b == nil {
  269. return unsafe.Pointer(t.elem.zero)
  270. }
  271. }
  272. }
  273. func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
  274. if raceenabled && h != nil {
  275. callerpc := getcallerpc(unsafe.Pointer(&t))
  276. racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
  277. }
  278. if h == nil || h.count == 0 {
  279. return unsafe.Pointer(t.elem.zero), false
  280. }
  281. key := (*stringStruct)(unsafe.Pointer(&ky))
  282. if h.B == 0 {
  283. // One-bucket table.
  284. b := (*bmap)(h.buckets)
  285. if key.len < 32 {
  286. // short key, doing lots of comparisons is ok
  287. for i := uintptr(0); i < bucketCnt; i++ {
  288. x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
  289. if x == empty {
  290. continue
  291. }
  292. k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
  293. if k.len != key.len {
  294. continue
  295. }
  296. if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
  297. return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
  298. }
  299. }
  300. return unsafe.Pointer(t.elem.zero), false
  301. }
  302. // long key, try not to do more comparisons than necessary
  303. keymaybe := uintptr(bucketCnt)
  304. for i := uintptr(0); i < bucketCnt; i++ {
  305. x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
  306. if x == empty {
  307. continue
  308. }
  309. k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
  310. if k.len != key.len {
  311. continue
  312. }
  313. if k.str == key.str {
  314. return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
  315. }
  316. // check first 4 bytes
  317. if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
  318. continue
  319. }
  320. // check last 4 bytes
  321. if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
  322. continue
  323. }
  324. if keymaybe != bucketCnt {
  325. // Two keys are potential matches. Use hash to distinguish them.
  326. goto dohash
  327. }
  328. keymaybe = i
  329. }
  330. if keymaybe != bucketCnt {
  331. k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*ptrSize))
  332. if memeq(k.str, key.str, uintptr(key.len)) {
  333. return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+keymaybe*uintptr(t.valuesize)), true
  334. }
  335. }
  336. return unsafe.Pointer(t.elem.zero), false
  337. }
  338. dohash:
  339. hash := goalg(t.key.alg).hash(noescape(unsafe.Pointer(&ky)), 2*ptrSize, uintptr(h.hash0))
  340. m := uintptr(1)<<h.B - 1
  341. b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
  342. if c := h.oldbuckets; c != nil {
  343. oldb := (*bmap)(add(c, (hash&(m>>1))*uintptr(t.bucketsize)))
  344. if !evacuated(oldb) {
  345. b = oldb
  346. }
  347. }
  348. top := uint8(hash >> (ptrSize*8 - 8))
  349. if top < minTopHash {
  350. top += minTopHash
  351. }
  352. for {
  353. for i := uintptr(0); i < bucketCnt; i++ {
  354. x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.topbits[i] without the bounds check
  355. if x != top {
  356. continue
  357. }
  358. k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*ptrSize))
  359. if k.len != key.len {
  360. continue
  361. }
  362. if k.str == key.str || memeq(k.str, key.str, uintptr(key.len)) {
  363. return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*ptrSize+i*uintptr(t.valuesize)), true
  364. }
  365. }
  366. b = b.overflow(t)
  367. if b == nil {
  368. return unsafe.Pointer(t.elem.zero), false
  369. }
  370. }
  371. }