Skip to content

Commit 48e207d

Browse files
committed
cmd/compile: fix mapassign_fast* routines for pointer keys
The signature of the mapassign_fast* routines need to distinguish the pointerness of their key argument. If the affected routines suspend part way through, the object pointed to by the key might get garbage collected because the key is typed as a uint{32,64}. This is not a problem for mapaccess or mapdelete because the key in those situations do not live beyond the call involved. If the object referenced by the key is garbage collected prematurely, the code still works fine. Even if that object is subsequently reallocated, it can't be written to the map in time to affect the lookup/delete. Fixes #22781 Change-Id: I0bbbc5e9883d5ce702faf4e655348be1191ee439 Reviewed-on: https://go-review.googlesource.com/79018 Run-TryBot: Keith Randall <[email protected]> Reviewed-by: Austin Clements <[email protected]> Reviewed-by: Martin Möhrmann <[email protected]>
1 parent a2a1c17 commit 48e207d

File tree

5 files changed

+227
-21
lines changed

5 files changed

+227
-21
lines changed

src/cmd/compile/internal/gc/builtin.go

Lines changed: 2 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/cmd/compile/internal/gc/builtin/runtime.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -109,7 +109,9 @@ func mapaccess2_faststr(mapType *byte, hmap map[any]any, key any) (val *any, pre
109109
func mapaccess2_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any, pres bool)
110110
func mapassign(mapType *byte, hmap map[any]any, key *any) (val *any)
111111
func mapassign_fast32(mapType *byte, hmap map[any]any, key any) (val *any)
112+
func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key any) (val *any)
112113
func mapassign_fast64(mapType *byte, hmap map[any]any, key any) (val *any)
114+
func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key any) (val *any)
113115
func mapassign_faststr(mapType *byte, hmap map[any]any, key any) (val *any)
114116
func mapiterinit(mapType *byte, hmap map[any]any, hiter *any)
115117
func mapdelete(mapType *byte, hmap map[any]any, key *any)

src/cmd/compile/internal/gc/walk.go

Lines changed: 23 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2826,21 +2826,23 @@ func mapfndel(name string, t *types.Type) *Node {
28262826
const (
28272827
mapslow = iota
28282828
mapfast32
2829+
mapfast32ptr
28292830
mapfast64
2831+
mapfast64ptr
28302832
mapfaststr
28312833
nmapfast
28322834
)
28332835

28342836
type mapnames [nmapfast]string
28352837

2836-
func mkmapnames(base string) mapnames {
2837-
return mapnames{base, base + "_fast32", base + "_fast64", base + "_faststr"}
2838+
func mkmapnames(base string, ptr string) mapnames {
2839+
return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
28382840
}
28392841

2840-
var mapaccess1 = mkmapnames("mapaccess1")
2841-
var mapaccess2 = mkmapnames("mapaccess2")
2842-
var mapassign = mkmapnames("mapassign")
2843-
var mapdelete = mkmapnames("mapdelete")
2842+
var mapaccess1 = mkmapnames("mapaccess1", "")
2843+
var mapaccess2 = mkmapnames("mapaccess2", "")
2844+
var mapassign = mkmapnames("mapassign", "ptr")
2845+
var mapdelete = mkmapnames("mapdelete", "")
28442846

28452847
func mapfast(t *types.Type) int {
28462848
// Check ../../runtime/hashmap.go:maxValueSize before changing.
@@ -2849,9 +2851,22 @@ func mapfast(t *types.Type) int {
28492851
}
28502852
switch algtype(t.Key()) {
28512853
case AMEM32:
2852-
return mapfast32
2854+
if !t.Key().HasHeapPointer() {
2855+
return mapfast32
2856+
}
2857+
if Widthptr == 4 {
2858+
return mapfast32ptr
2859+
}
2860+
Fatalf("small pointer %v", t.Key())
28532861
case AMEM64:
2854-
return mapfast64
2862+
if !t.Key().HasHeapPointer() {
2863+
return mapfast64
2864+
}
2865+
if Widthptr == 8 {
2866+
return mapfast64ptr
2867+
}
2868+
// Two-word object, at least one of which is a pointer.
2869+
// Use the slow path.
28552870
case ASTRING:
28562871
return mapfaststr
28572872
}

src/runtime/hashmap_fast.go

Lines changed: 171 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -420,11 +420,93 @@ again:
420420

421421
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
422422
// store new key at insert position
423-
if sys.PtrSize == 4 && t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
424-
writebarrierptr((*uintptr)(insertk), uintptr(key))
425-
} else {
426-
*(*uint32)(insertk) = key
423+
*(*uint32)(insertk) = key
424+
425+
h.count++
426+
427+
done:
428+
val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.valuesize))
429+
if h.flags&hashWriting == 0 {
430+
throw("concurrent map writes")
431+
}
432+
h.flags &^= hashWriting
433+
return val
434+
}
435+
436+
func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
437+
if h == nil {
438+
panic(plainError("assignment to entry in nil map"))
439+
}
440+
if raceenabled {
441+
callerpc := getcallerpc()
442+
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
443+
}
444+
if h.flags&hashWriting != 0 {
445+
throw("concurrent map writes")
427446
}
447+
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
448+
449+
// Set hashWriting after calling alg.hash for consistency with mapassign.
450+
h.flags |= hashWriting
451+
452+
if h.buckets == nil {
453+
h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
454+
}
455+
456+
again:
457+
bucket := hash & bucketMask(h.B)
458+
if h.growing() {
459+
growWork_fast32(t, h, bucket)
460+
}
461+
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
462+
463+
var insertb *bmap
464+
var inserti uintptr
465+
var insertk unsafe.Pointer
466+
467+
for {
468+
for i := uintptr(0); i < bucketCnt; i++ {
469+
if b.tophash[i] == empty {
470+
if insertb == nil {
471+
inserti = i
472+
insertb = b
473+
}
474+
continue
475+
}
476+
k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
477+
if k != key {
478+
continue
479+
}
480+
inserti = i
481+
insertb = b
482+
goto done
483+
}
484+
ovf := b.overflow(t)
485+
if ovf == nil {
486+
break
487+
}
488+
b = ovf
489+
}
490+
491+
// Did not find mapping for key. Allocate new cell & add entry.
492+
493+
// If we hit the max load factor or we have too many overflow buckets,
494+
// and we're not already in the middle of growing, start growing.
495+
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
496+
hashGrow(t, h)
497+
goto again // Growing the table invalidates everything, so try again
498+
}
499+
500+
if insertb == nil {
501+
// all current buckets are full, allocate a new one.
502+
insertb = h.newoverflow(t, b)
503+
inserti = 0 // not necessary, but avoids needlessly spilling inserti
504+
}
505+
insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
506+
507+
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*4)
508+
// store new key at insert position
509+
*(*unsafe.Pointer)(insertk) = key
428510

429511
h.count++
430512

@@ -510,18 +592,94 @@ again:
510592

511593
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
512594
// store new key at insert position
513-
if t.key.kind&kindNoPointers == 0 && writeBarrier.enabled {
514-
if sys.PtrSize == 8 {
515-
writebarrierptr((*uintptr)(insertk), uintptr(key))
516-
} else {
517-
// There are three ways to squeeze at least one 32 bit pointer into 64 bits.
518-
// Give up and call typedmemmove.
519-
typedmemmove(t.key, insertk, unsafe.Pointer(&key))
595+
*(*uint64)(insertk) = key
596+
597+
h.count++
598+
599+
done:
600+
val := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.valuesize))
601+
if h.flags&hashWriting == 0 {
602+
throw("concurrent map writes")
603+
}
604+
h.flags &^= hashWriting
605+
return val
606+
}
607+
608+
func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
609+
if h == nil {
610+
panic(plainError("assignment to entry in nil map"))
611+
}
612+
if raceenabled {
613+
callerpc := getcallerpc()
614+
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
615+
}
616+
if h.flags&hashWriting != 0 {
617+
throw("concurrent map writes")
618+
}
619+
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
620+
621+
// Set hashWriting after calling alg.hash for consistency with mapassign.
622+
h.flags |= hashWriting
623+
624+
if h.buckets == nil {
625+
h.buckets = newobject(t.bucket) // newarray(t.bucket, 1)
626+
}
627+
628+
again:
629+
bucket := hash & bucketMask(h.B)
630+
if h.growing() {
631+
growWork_fast64(t, h, bucket)
632+
}
633+
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
634+
635+
var insertb *bmap
636+
var inserti uintptr
637+
var insertk unsafe.Pointer
638+
639+
for {
640+
for i := uintptr(0); i < bucketCnt; i++ {
641+
if b.tophash[i] == empty {
642+
if insertb == nil {
643+
insertb = b
644+
inserti = i
645+
}
646+
continue
647+
}
648+
k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
649+
if k != key {
650+
continue
651+
}
652+
insertb = b
653+
inserti = i
654+
goto done
520655
}
521-
} else {
522-
*(*uint64)(insertk) = key
656+
ovf := b.overflow(t)
657+
if ovf == nil {
658+
break
659+
}
660+
b = ovf
661+
}
662+
663+
// Did not find mapping for key. Allocate new cell & add entry.
664+
665+
// If we hit the max load factor or we have too many overflow buckets,
666+
// and we're not already in the middle of growing, start growing.
667+
if !h.growing() && (overLoadFactor(h.count+1, h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
668+
hashGrow(t, h)
669+
goto again // Growing the table invalidates everything, so try again
523670
}
524671

672+
if insertb == nil {
673+
// all current buckets are full, allocate a new one.
674+
insertb = h.newoverflow(t, b)
675+
inserti = 0 // not necessary, but avoids needlessly spilling inserti
676+
}
677+
insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
678+
679+
insertk = add(unsafe.Pointer(insertb), dataOffset+inserti*8)
680+
// store new key at insert position
681+
*(*unsafe.Pointer)(insertk) = key
682+
525683
h.count++
526684

527685
done:

test/fixedbugs/issue22781.go

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
// run
2+
3+
// Copyright 2017 The Go Authors. All rights reserved.
4+
// Use of this source code is governed by a BSD-style
5+
// license that can be found in the LICENSE file.
6+
7+
package main
8+
9+
import "runtime/debug"
10+
11+
type T struct {
12+
// >= 16 bytes to avoid tiny alloc.
13+
a, b int
14+
}
15+
16+
func main() {
17+
debug.SetGCPercent(1)
18+
for i := 0; i < 100000; i++ {
19+
m := make(map[*T]struct{}, 0)
20+
for j := 0; j < 20; j++ {
21+
// During the call to mapassign_fast64, the key argument
22+
// was incorrectly treated as a uint64. If the stack was
23+
// scanned during that call, the only pointer to k was
24+
// missed, leading to *k being collected prematurely.
25+
k := new(T)
26+
m[k] = struct{}{}
27+
}
28+
}
29+
}

0 commit comments

Comments
 (0)