Skip to content

Commit edd3537

Browse files
randall77andybons
authored andcommitted
[release-branch.go1.9] cmd/compile: fix mapassign_fast* routines for pointer keys
The signature of the mapassign_fast* routines need to distinguish the pointerness of their key argument. If the affected routines suspend part way through, the object pointed to by the key might get garbage collected because the key is typed as a uint{32,64}. This is not a problem for mapaccess or mapdelete because the key in those situations do not live beyond the call involved. If the object referenced by the key is garbage collected prematurely, the code still works fine. Even if that object is subsequently reallocated, it can't be written to the map in time to affect the lookup/delete. Fixes #22781 Change-Id: I0bbbc5e9883d5ce702faf4e655348be1191ee439 Reviewed-on: https://go-review.googlesource.com/79018 Run-TryBot: Keith Randall <[email protected]> Reviewed-by: Austin Clements <[email protected]> Reviewed-by: Martin Möhrmann <[email protected]> Reviewed-on: https://go-review.googlesource.com/88635 Run-TryBot: Andrew Bonventre <[email protected]> TryBot-Result: Gobot Gobot <[email protected]> Reviewed-by: Keith Randall <[email protected]>
1 parent 0a72be2 commit edd3537

File tree

5 files changed

+232
-8
lines changed

5 files changed

+232
-8
lines changed

src/cmd/compile/internal/gc/builtin.go

Lines changed: 2 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/cmd/compile/internal/gc/builtin/runtime.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,7 +106,9 @@ func mapaccess2_faststr(mapType *byte, hmap map[any]any, key any) (val *any, pre
106106
func mapaccess2_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any, pres bool)
107107
func mapassign(mapType *byte, hmap map[any]any, key *any) (val *any)
108108
func mapassign_fast32(mapType *byte, hmap map[any]any, key any) (val *any)
109+
func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key any) (val *any)
109110
func mapassign_fast64(mapType *byte, hmap map[any]any, key any) (val *any)
111+
func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key any) (val *any)
110112
func mapassign_faststr(mapType *byte, hmap map[any]any, key any) (val *any)
111113
func mapiterinit(mapType *byte, hmap map[any]any, hiter *any)
112114
func mapdelete(mapType *byte, hmap map[any]any, key *any)

src/cmd/compile/internal/gc/walk.go

Lines changed: 23 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -2785,21 +2785,23 @@ func mapfndel(name string, t *types.Type) *Node {
27852785
const (
27862786
mapslow = iota
27872787
mapfast32
2788+
mapfast32ptr
27882789
mapfast64
2790+
mapfast64ptr
27892791
mapfaststr
27902792
nmapfast
27912793
)
27922794

27932795
type mapnames [nmapfast]string
27942796

2795-
func mkmapnames(base string) mapnames {
2796-
return mapnames{base, base + "_fast32", base + "_fast64", base + "_faststr"}
2797+
func mkmapnames(base string, ptr string) mapnames {
2798+
return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
27972799
}
27982800

2799-
var mapaccess1 mapnames = mkmapnames("mapaccess1")
2800-
var mapaccess2 mapnames = mkmapnames("mapaccess2")
2801-
var mapassign mapnames = mkmapnames("mapassign")
2802-
var mapdelete mapnames = mkmapnames("mapdelete")
2801+
var mapaccess1 mapnames = mkmapnames("mapaccess1", "")
2802+
var mapaccess2 mapnames = mkmapnames("mapaccess2", "")
2803+
var mapassign mapnames = mkmapnames("mapassign", "ptr")
2804+
var mapdelete mapnames = mkmapnames("mapdelete", "")
28032805

28042806
func mapfast(t *types.Type) int {
28052807
// Check ../../runtime/hashmap.go:maxValueSize before changing.
@@ -2808,9 +2810,22 @@ func mapfast(t *types.Type) int {
28082810
}
28092811
switch algtype(t.Key()) {
28102812
case AMEM32:
2811-
return mapfast32
2813+
if !t.Key().HasPointer() {
2814+
return mapfast32
2815+
}
2816+
if Widthptr == 4 {
2817+
return mapfast32ptr
2818+
}
2819+
Fatalf("small pointer %v", t.Key())
28122820
case AMEM64:
2813-
return mapfast64
2821+
if !t.Key().HasPointer() {
2822+
return mapfast64
2823+
}
2824+
if Widthptr == 8 {
2825+
return mapfast64ptr
2826+
}
2827+
// Two-word object, at least one of which is a pointer.
2828+
// Use the slow path.
28142829
case ASTRING:
28152830
return mapfaststr
28162831
}

src/runtime/hashmap_fast.go

Lines changed: 176 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -507,6 +507,94 @@ done:
507507
return val
508508
}
509509

510+
func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
511+
if h == nil {
512+
panic(plainError("assignment to entry in nil map"))
513+
}
514+
if raceenabled {
515+
callerpc := getcallerpc(unsafe.Pointer(&t))
516+
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
517+
}
518+
if h.flags&hashWriting != 0 {
519+
throw("concurrent map writes")
520+
}
521+
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
522+
523+
// Set hashWriting after calling alg.hash for consistency with mapassign.
524+
h.flags |= hashWriting
525+
526+
if h.buckets == nil {
527+
h.buckets = newarray(t.bucket, 1)
528+
}
529+
530+
again:
531+
bucket := hash & (uintptr(1)<<h.B - 1)
532+
if h.growing() {
533+
growWork(t, h, bucket)
534+
}
535+
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
536+
top := uint8(hash >> (sys.PtrSize*8 - 8))
537+
if top < minTopHash {
538+
top += minTopHash
539+
}
540+
541+
var inserti *uint8
542+
var insertk unsafe.Pointer
543+
var val unsafe.Pointer
544+
for {
545+
for i := uintptr(0); i < bucketCnt; i++ {
546+
if b.tophash[i] != top {
547+
if b.tophash[i] == empty && inserti == nil {
548+
inserti = &b.tophash[i]
549+
insertk = add(unsafe.Pointer(b), dataOffset+i*4)
550+
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
551+
}
552+
continue
553+
}
554+
k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
555+
if k != key {
556+
continue
557+
}
558+
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
559+
goto done
560+
}
561+
ovf := b.overflow(t)
562+
if ovf == nil {
563+
break
564+
}
565+
b = ovf
566+
}
567+
568+
// Did not find mapping for key. Allocate new cell & add entry.
569+
570+
// If we hit the max load factor or we have too many overflow buckets,
571+
// and we're not already in the middle of growing, start growing.
572+
if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
573+
hashGrow(t, h)
574+
goto again // Growing the table invalidates everything, so try again
575+
}
576+
577+
if inserti == nil {
578+
// all current buckets are full, allocate a new one.
579+
newb := h.newoverflow(t, b)
580+
inserti = &newb.tophash[0]
581+
insertk = add(unsafe.Pointer(newb), dataOffset)
582+
val = add(insertk, bucketCnt*4)
583+
}
584+
585+
// store new key/value at insert position
586+
typedmemmove(t.key, insertk, unsafe.Pointer(&key))
587+
*inserti = top
588+
h.count++
589+
590+
done:
591+
if h.flags&hashWriting == 0 {
592+
throw("concurrent map writes")
593+
}
594+
h.flags &^= hashWriting
595+
return val
596+
}
597+
510598
func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
511599
if h == nil {
512600
panic(plainError("assignment to entry in nil map"))
@@ -595,6 +683,94 @@ done:
595683
return val
596684
}
597685

686+
func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
687+
if h == nil {
688+
panic(plainError("assignment to entry in nil map"))
689+
}
690+
if raceenabled {
691+
callerpc := getcallerpc(unsafe.Pointer(&t))
692+
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
693+
}
694+
if h.flags&hashWriting != 0 {
695+
throw("concurrent map writes")
696+
}
697+
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
698+
699+
// Set hashWriting after calling alg.hash for consistency with mapassign.
700+
h.flags |= hashWriting
701+
702+
if h.buckets == nil {
703+
h.buckets = newarray(t.bucket, 1)
704+
}
705+
706+
again:
707+
bucket := hash & (uintptr(1)<<h.B - 1)
708+
if h.growing() {
709+
growWork(t, h, bucket)
710+
}
711+
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
712+
top := uint8(hash >> (sys.PtrSize*8 - 8))
713+
if top < minTopHash {
714+
top += minTopHash
715+
}
716+
717+
var inserti *uint8
718+
var insertk unsafe.Pointer
719+
var val unsafe.Pointer
720+
for {
721+
for i := uintptr(0); i < bucketCnt; i++ {
722+
if b.tophash[i] != top {
723+
if b.tophash[i] == empty && inserti == nil {
724+
inserti = &b.tophash[i]
725+
insertk = add(unsafe.Pointer(b), dataOffset+i*8)
726+
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
727+
}
728+
continue
729+
}
730+
k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
731+
if k != key {
732+
continue
733+
}
734+
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
735+
goto done
736+
}
737+
ovf := b.overflow(t)
738+
if ovf == nil {
739+
break
740+
}
741+
b = ovf
742+
}
743+
744+
// Did not find mapping for key. Allocate new cell & add entry.
745+
746+
// If we hit the max load factor or we have too many overflow buckets,
747+
// and we're not already in the middle of growing, start growing.
748+
if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
749+
hashGrow(t, h)
750+
goto again // Growing the table invalidates everything, so try again
751+
}
752+
753+
if inserti == nil {
754+
// all current buckets are full, allocate a new one.
755+
newb := h.newoverflow(t, b)
756+
inserti = &newb.tophash[0]
757+
insertk = add(unsafe.Pointer(newb), dataOffset)
758+
val = add(insertk, bucketCnt*8)
759+
}
760+
761+
// store new key/value at insert position
762+
typedmemmove(t.key, insertk, unsafe.Pointer(&key))
763+
*inserti = top
764+
h.count++
765+
766+
done:
767+
if h.flags&hashWriting == 0 {
768+
throw("concurrent map writes")
769+
}
770+
h.flags &^= hashWriting
771+
return val
772+
}
773+
598774
func mapassign_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
599775
if h == nil {
600776
panic(plainError("assignment to entry in nil map"))

test/fixedbugs/issue22781.go

Lines changed: 29 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,29 @@
1+
// run
2+
3+
// Copyright 2017 The Go Authors. All rights reserved.
4+
// Use of this source code is governed by a BSD-style
5+
// license that can be found in the LICENSE file.
6+
7+
package main
8+
9+
import "runtime/debug"
10+
11+
type T struct {
12+
// >= 16 bytes to avoid tiny alloc.
13+
a, b int
14+
}
15+
16+
func main() {
17+
debug.SetGCPercent(1)
18+
for i := 0; i < 100000; i++ {
19+
m := make(map[*T]struct{}, 0)
20+
for j := 0; j < 20; j++ {
21+
// During the call to mapassign_fast64, the key argument
22+
// was incorrectly treated as a uint64. If the stack was
23+
// scanned during that call, the only pointer to k was
24+
// missed, leading to *k being collected prematurely.
25+
k := new(T)
26+
m[k] = struct{}{}
27+
}
28+
}
29+
}

0 commit comments

Comments
 (0)