@@ -145,8 +145,27 @@ func BenchmarkLoadOrStoreCollision(b *testing.B) {
145
145
})
146
146
}
147
147
148
+ func BenchmarkRange (b * testing.B ) {
149
+ const mapSize = 1 << 10
150
+
151
+ benchMap (b , bench {
152
+ setup : func (_ * testing.B , m mapInterface ) {
153
+ for i := 0 ; i < mapSize ; i ++ {
154
+ m .Store (i , i )
155
+ }
156
+ },
157
+
158
+ perG : func (b * testing.B , pb * testing.PB , i int , m mapInterface ) {
159
+ for ; pb .Next (); i ++ {
160
+ m .Range (func (_ , _ interface {}) bool { return true })
161
+ }
162
+ },
163
+ })
164
+ }
165
+
148
166
// BenchmarkAdversarialAlloc tests performance when we store a new value
149
- // immediately whenever the map is promoted to clean.
167
+ // immediately whenever the map is promoted to clean and otherwise load a
168
+ // unique, missing key.
150
169
//
151
170
// This forces the Load calls to always acquire the map's mutex.
152
171
func BenchmarkAdversarialAlloc (b * testing.B ) {
@@ -165,8 +184,8 @@ func BenchmarkAdversarialAlloc(b *testing.B) {
165
184
})
166
185
}
167
186
168
- // BenchmarkAdversarialDelete tests performance when we delete and restore a
169
- // value immediately after a large map has been promoted .
187
+ // BenchmarkAdversarialDelete tests performance when we periodically delete
188
+ // one key and add a different one in a large map .
170
189
//
171
190
// This forces the Load calls to always acquire the map's mutex and periodically
172
191
// makes a full copy of the map despite changing only one entry.
@@ -191,7 +210,7 @@ func BenchmarkAdversarialDelete(b *testing.B) {
191
210
return false
192
211
})
193
212
m .Delete (key )
194
- m .Store (key , key )
213
+ m .Store (i , i )
195
214
}
196
215
}
197
216
},
0 commit comments