@@ -106,13 +106,24 @@ type hmap struct {
106
106
// Note: the format of the Hmap is encoded in ../../cmd/gc/reflect.c and
107
107
// ../reflect/type.go. Don't change this structure without also changing that code!
108
108
count int // # live cells == size of map. Must be first (used by len() builtin)
109
- flags uint32
110
- hash0 uint32 // hash seed
109
+ flags uint8
111
110
B uint8 // log_2 of # of buckets (can hold up to loadFactor * 2^B items)
111
+ hash0 uint32 // hash seed
112
112
113
113
buckets unsafe.Pointer // array of 2^B Buckets. may be nil if count==0.
114
114
oldbuckets unsafe.Pointer // previous bucket array of half the size, non-nil only when growing
115
115
nevacuate uintptr // progress counter for evacuation (buckets less than this have been evacuated)
116
+
117
+ // If both key and value do not contain pointers, then we mark bucket
118
+ // type as containing no pointers. This avoids scanning such maps.
119
+ // However, bmap.overflow is a pointer. In order to keep overflow buckets
120
+ // alive, we store pointers to all overflow buckets in hmap.overflow.
121
+ // Overflow is used only if key and value do not contain pointers.
122
+ // overflow[0] contains overflow buckets for hmap.buckets.
123
+ // overflow[1] contains overflow buckets for hmap.oldbuckets.
124
+ // The first indirection allows us to reduce static size of hmap.
125
+ // The second indirection allows to store a pointer to the slice in hiter.
126
+ overflow * [2 ]* []* bmap
116
127
}
117
128
118
129
// A bucket for a Go map.
@@ -135,6 +146,7 @@ type hiter struct {
135
146
h * hmap
136
147
buckets unsafe.Pointer // bucket ptr at hash_iter initialization time
137
148
bptr * bmap // current bucket
149
+ overflow [2 ]* []* bmap // keeps overflow buckets alive
138
150
startBucket uintptr // bucket iteration started at
139
151
offset uint8 // intra-bucket offset to start from during iteration (should be big enough to hold bucketCnt-1)
140
152
wrapped bool // already wrapped around from end of bucket array to beginning
@@ -152,10 +164,24 @@ func evacuated(b *bmap) bool {
152
164
func (b * bmap ) overflow (t * maptype ) * bmap {
153
165
return * (* * bmap )(add (unsafe .Pointer (b ), uintptr (t .bucketsize )- regSize ))
154
166
}
155
- func (b * bmap ) setoverflow (t * maptype , ovf * bmap ) {
167
+
168
+ func (h * hmap ) setoverflow (t * maptype , b , ovf * bmap ) {
169
+ if t .bucket .kind & kindNoPointers != 0 {
170
+ h .createOverflow ()
171
+ * h .overflow [0 ] = append (* h .overflow [0 ], ovf )
172
+ }
156
173
* (* * bmap )(add (unsafe .Pointer (b ), uintptr (t .bucketsize )- regSize )) = ovf
157
174
}
158
175
176
+ func (h * hmap ) createOverflow () {
177
+ if h .overflow == nil {
178
+ h .overflow = new ([2 ]* []* bmap )
179
+ }
180
+ if h .overflow [0 ] == nil {
181
+ h .overflow [0 ] = new ([]* bmap )
182
+ }
183
+ }
184
+
159
185
func makemap (t * maptype , hint int64 ) * hmap {
160
186
if sz := unsafe .Sizeof (hmap {}); sz > 48 || sz != uintptr (t .hmap .size ) {
161
187
throw ("bad hmap size" )
@@ -463,7 +489,7 @@ again:
463
489
memstats .next_gc = memstats .heap_alloc
464
490
}
465
491
newb := (* bmap )(newobject (t .bucket ))
466
- b .setoverflow (t , newb )
492
+ h .setoverflow (t , b , newb )
467
493
inserti = & newb .tophash [0 ]
468
494
insertk = add (unsafe .Pointer (newb ), dataOffset )
469
495
insertv = add (insertk , bucketCnt * uintptr (t .keysize ))
@@ -548,6 +574,8 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
548
574
it .h = nil
549
575
it .buckets = nil
550
576
it .bptr = nil
577
+ it .overflow [0 ] = nil
578
+ it .overflow [1 ] = nil
551
579
552
580
if raceenabled && h != nil {
553
581
callerpc := getcallerpc (unsafe .Pointer (& t ))
@@ -560,7 +588,7 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
560
588
return
561
589
}
562
590
563
- if unsafe .Sizeof (hiter {})/ ptrSize != 10 {
591
+ if unsafe .Sizeof (hiter {})/ ptrSize != 12 {
564
592
throw ("hash_iter size incorrect" ) // see ../../cmd/gc/reflect.c
565
593
}
566
594
it .t = t
@@ -569,6 +597,14 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
569
597
// grab snapshot of bucket state
570
598
it .B = h .B
571
599
it .buckets = h .buckets
600
+ if t .bucket .kind & kindNoPointers != 0 {
601
+ // Allocate the current slice and remember pointers to both current and old.
602
+ // This preserves all relevant overflow buckets alive even if
603
+ // the table grows and/or overflow buckets are added to the table
604
+ // while we are iterating.
605
+ h .createOverflow ()
606
+ it .overflow = * h .overflow
607
+ }
572
608
573
609
// decide where to start
574
610
r := uintptr (fastrand1 ())
@@ -585,14 +621,8 @@ func mapiterinit(t *maptype, h *hmap, it *hiter) {
585
621
586
622
// Remember we have an iterator.
587
623
// Can run concurrently with another hash_iter_init().
588
- for {
589
- old := h .flags
590
- if old == old | iterator | oldIterator {
591
- break
592
- }
593
- if cas (& h .flags , old , old | iterator | oldIterator ) {
594
- break
595
- }
624
+ if old := h .flags ; old & (iterator | oldIterator ) != iterator | oldIterator {
625
+ atomicor8 (& h .flags , iterator | oldIterator )
596
626
}
597
627
598
628
mapiternext (it )
@@ -753,6 +783,15 @@ func hashGrow(t *maptype, h *hmap) {
753
783
h .buckets = newbuckets
754
784
h .nevacuate = 0
755
785
786
+ if h .overflow != nil {
787
+ // Promote current overflow buckets to the old generation.
788
+ if h .overflow [1 ] != nil {
789
+ throw ("overflow is not nil" )
790
+ }
791
+ h .overflow [1 ] = h .overflow [0 ]
792
+ h .overflow [0 ] = nil
793
+ }
794
+
756
795
// the actual copying of the hash table data is done incrementally
757
796
// by growWork() and evacuate().
758
797
}
@@ -836,7 +875,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
836
875
memstats .next_gc = memstats .heap_alloc
837
876
}
838
877
newx := (* bmap )(newobject (t .bucket ))
839
- x .setoverflow (t , newx )
878
+ h .setoverflow (t , x , newx )
840
879
x = newx
841
880
xi = 0
842
881
xk = add (unsafe .Pointer (x ), dataOffset )
@@ -863,7 +902,7 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
863
902
memstats .next_gc = memstats .heap_alloc
864
903
}
865
904
newy := (* bmap )(newobject (t .bucket ))
866
- y .setoverflow (t , newy )
905
+ h .setoverflow (t , y , newy )
867
906
y = newy
868
907
yi = 0
869
908
yk = add (unsafe .Pointer (y ), dataOffset )
@@ -899,6 +938,12 @@ func evacuate(t *maptype, h *hmap, oldbucket uintptr) {
899
938
if oldbucket + 1 == newbit { // newbit == # of oldbuckets
900
939
// Growing is all done. Free old main bucket array.
901
940
h .oldbuckets = nil
941
+ // Can discard old overflow buckets as well.
942
+ // If they are still referenced by an iterator,
943
+ // then the iterator holds a pointers to the slice.
944
+ if h .overflow != nil {
945
+ h .overflow [1 ] = nil
946
+ }
902
947
}
903
948
}
904
949
}
0 commit comments