Skip to content

Commit 30a82ef

Browse files
committed
cmd/compile, runtime: track argument stack slot liveness
Currently, for stack traces (e.g. at panic or when runtime.Stack is called), we print argument values from the stack. With register ABI, we may never store the argument to stack therefore the argument value on stack may be meaningless. This causes confusion. This CL makes the compiler keep trace of which argument stack slots are meaningful. If it is meaningful, it will be printed in stack traces as before. If it may not be meaningful, it will be printed as the stack value with a question mark ("?"). In general, the value could be meaningful on some code paths but not others depending on the execution, and the compiler couldn't know statically, so we still print the stack value, instead of not printing it at all. Also note that if the argument variable is updated in the function body the printed value may be stale (like before register ABI) but still considered meaningful. Arguments passed on stack are always meaningful therefore always printed without a question mark. Results are never printed, as before. (Due to a bug in the compiler we sometimes don't spill args into their dedicated spill slots (as we should), causing it having fewer meaningful values than it should be.) This increases binary sizes a bit: old new hello 1129760 1142080 +1.09% cmd/go 13932320 14088016 +1.12% cmd/link 6267696 6329168 +0.98% Fixes #45728. Change-Id: I308a0402e5c5ab94ca0953f8bd85a56acd28f58c Reviewed-on: https://go-review.googlesource.com/c/go/+/352057 Trust: Cherry Mui <[email protected]> Reviewed-by: Michael Knyszek <[email protected]>
1 parent bbc0595 commit 30a82ef

File tree

15 files changed

+538
-14
lines changed

15 files changed

+538
-14
lines changed

src/cmd/compile/internal/bitvec/bv.go

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -128,10 +128,21 @@ func (bv BitVec) IsEmpty() bool {
128128
return true
129129
}
130130

131+
func (bv BitVec) Count() int {
132+
n := 0
133+
for _, x := range bv.B {
134+
n += bits.OnesCount32(x)
135+
}
136+
return n
137+
}
138+
131139
func (bv BitVec) Not() {
132140
for i, x := range bv.B {
133141
bv.B[i] = ^x
134142
}
143+
if bv.N%wordBits != 0 {
144+
bv.B[len(bv.B)-1] &= 1<<uint(bv.N%wordBits) - 1 // clear bits past N in the last word
145+
}
135146
}
136147

137148
// union

src/cmd/compile/internal/gc/obj.go

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -249,8 +249,7 @@ func addGCLocals() {
249249
}
250250
}
251251
if x := fn.StackObjects; x != nil {
252-
attr := int16(obj.RODATA)
253-
objw.Global(x, int32(len(x.P)), attr)
252+
objw.Global(x, int32(len(x.P)), obj.RODATA)
254253
x.Set(obj.AttrStatic, true)
255254
}
256255
if x := fn.OpenCodedDeferInfo; x != nil {
@@ -260,6 +259,10 @@ func addGCLocals() {
260259
objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
261260
x.Set(obj.AttrStatic, true)
262261
}
262+
if x := fn.ArgLiveInfo; x != nil {
263+
objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
264+
x.Set(obj.AttrStatic, true)
265+
}
263266
}
264267
}
265268

Lines changed: 339 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,339 @@
1+
// Copyright 2021 The Go Authors. All rights reserved.
2+
// Use of this source code is governed by a BSD-style
3+
// license that can be found in the LICENSE file.
4+
5+
package liveness
6+
7+
import (
8+
"fmt"
9+
10+
"cmd/compile/internal/base"
11+
"cmd/compile/internal/bitvec"
12+
"cmd/compile/internal/ir"
13+
"cmd/compile/internal/objw"
14+
"cmd/compile/internal/ssa"
15+
"cmd/internal/obj"
16+
"cmd/internal/objabi"
17+
)
18+
19+
// Argument liveness tracking.
20+
//
21+
// For arguments passed in registers, this file tracks if their spill slots
22+
// are live for runtime traceback. An argument spill slot is live at a PC
23+
// if we know that an actual value has stored into it at or before this point.
24+
//
25+
// Stack args are always live and not tracked in this code. Stack args are
26+
// laid out before register spill slots, so we emit the smallest offset that
27+
// needs tracking. Slots before that offset are always live. That offset is
28+
// usually the offset of the first spill slot. But if the first spill slot is
29+
// always live (e.g. if it is address-taken), it will be the offset of a later
30+
// one.
31+
//
32+
// The liveness information is emitted as a FUNCDATA and a PCDATA.
33+
//
34+
// FUNCDATA format:
35+
// - start (smallest) offset that needs tracking (1 byte)
36+
// - a list of bitmaps.
37+
// In a bitmap bit i is set if the i-th spill slot is live.
38+
//
39+
// At a PC where the liveness info changes, a PCDATA indicates the
40+
// byte offset of the liveness map in the FUNCDATA. PCDATA -1 is a
41+
// special case indicating all slots are live (for binary size
42+
// saving).
43+
44+
const allLiveIdx = -1
45+
46+
// name and offset
47+
type nameOff struct {
48+
n *ir.Name
49+
off int64
50+
}
51+
52+
func (a nameOff) FrameOffset() int64 { return a.n.FrameOffset() + a.off }
53+
func (a nameOff) String() string { return fmt.Sprintf("%v+%d", a.n, a.off) }
54+
55+
type blockArgEffects struct {
56+
livein bitvec.BitVec // variables live at block entry
57+
liveout bitvec.BitVec // variables live at block exit
58+
}
59+
60+
type argLiveness struct {
61+
fn *ir.Func
62+
f *ssa.Func
63+
args []nameOff // name and offset of spill slots
64+
idx map[nameOff]int32 // index in args
65+
66+
be []blockArgEffects // indexed by block ID
67+
68+
bvset bvecSet // Set of liveness bitmaps, used for uniquifying.
69+
70+
// Liveness map indices at each Value (where it changes) and Block entry.
71+
// During the computation the indices are temporarily index to bvset.
72+
// At the end they will be index (offset) to the output funcdata (changed
73+
// in (*argLiveness).emit).
74+
blockIdx map[ssa.ID]int
75+
valueIdx map[ssa.ID]int
76+
}
77+
78+
// ArgLiveness computes the liveness information of register argument spill slots.
79+
// An argument's spill slot is "live" if we know it contains a meaningful value,
80+
// that is, we have stored the register value to it.
81+
// Returns the liveness map indices at each Block entry and at each Value (where
82+
// it changes).
83+
func ArgLiveness(fn *ir.Func, f *ssa.Func, pp *objw.Progs) (blockIdx, valueIdx map[ssa.ID]int) {
84+
if f.OwnAux.ABIInfo().InRegistersUsed() == 0 || base.Flag.N != 0 {
85+
// No register args. Nothing to emit.
86+
// Or if -N is used we spill everything upfront so it is always live.
87+
return nil, nil
88+
}
89+
90+
lv := &argLiveness{
91+
fn: fn,
92+
f: f,
93+
idx: make(map[nameOff]int32),
94+
be: make([]blockArgEffects, f.NumBlocks()),
95+
blockIdx: make(map[ssa.ID]int),
96+
valueIdx: make(map[ssa.ID]int),
97+
}
98+
// Gather all register arg spill slots.
99+
for _, a := range f.OwnAux.ABIInfo().InParams() {
100+
n, ok := a.Name.(*ir.Name)
101+
if !ok || len(a.Registers) == 0 {
102+
continue
103+
}
104+
_, offs := a.RegisterTypesAndOffsets()
105+
for _, off := range offs {
106+
if n.FrameOffset()+off > 0xff {
107+
// We only print a limited number of args, with stack
108+
// offsets no larger than 255.
109+
continue
110+
}
111+
lv.args = append(lv.args, nameOff{n, off})
112+
}
113+
}
114+
if len(lv.args) > 10 {
115+
lv.args = lv.args[:10] // We print no more than 10 args.
116+
}
117+
118+
// We spill address-taken or non-SSA-able value upfront, so they are always live.
119+
alwaysLive := func(n *ir.Name) bool { return n.Addrtaken() || !f.Frontend().CanSSA(n.Type()) }
120+
121+
// We'll emit the smallest offset for the slots that need liveness info.
122+
// No need to include a slot with a lower offset if it is always live.
123+
for len(lv.args) > 0 && alwaysLive(lv.args[0].n) {
124+
lv.args = lv.args[1:]
125+
}
126+
if len(lv.args) == 0 {
127+
return // everything is always live
128+
}
129+
130+
for i, a := range lv.args {
131+
lv.idx[a] = int32(i)
132+
}
133+
134+
nargs := int32(len(lv.args))
135+
bulk := bitvec.NewBulk(nargs, int32(len(f.Blocks)*2))
136+
for _, b := range f.Blocks {
137+
be := &lv.be[b.ID]
138+
be.livein = bulk.Next()
139+
be.liveout = bulk.Next()
140+
141+
// initialize to all 1s, so we can AND them
142+
be.livein.Not()
143+
be.liveout.Not()
144+
}
145+
146+
entrybe := &lv.be[f.Entry.ID]
147+
entrybe.livein.Clear()
148+
for i, a := range lv.args {
149+
if alwaysLive(a.n) {
150+
entrybe.livein.Set(int32(i))
151+
}
152+
}
153+
154+
// Visit blocks in reverse-postorder, compute block effects.
155+
po := f.Postorder()
156+
for i := len(po) - 1; i >= 0; i-- {
157+
b := po[i]
158+
be := &lv.be[b.ID]
159+
160+
// A slot is live at block entry if it is live in all predecessors.
161+
for _, pred := range b.Preds {
162+
pb := pred.Block()
163+
be.livein.And(be.livein, lv.be[pb.ID].liveout)
164+
}
165+
166+
be.liveout.Copy(be.livein)
167+
for _, v := range b.Values {
168+
lv.valueEffect(v, be.liveout)
169+
}
170+
}
171+
172+
// Coalesce identical live vectors. Compute liveness indices at each PC
173+
// where it changes.
174+
live := bitvec.New(nargs)
175+
addToSet := func(bv bitvec.BitVec) (int, bool) {
176+
if bv.Count() == int(nargs) { // special case for all live
177+
return allLiveIdx, false
178+
}
179+
return lv.bvset.add(bv)
180+
}
181+
for _, b := range lv.f.Blocks {
182+
be := &lv.be[b.ID]
183+
lv.blockIdx[b.ID], _ = addToSet(be.livein)
184+
185+
live.Copy(be.livein)
186+
var lastv *ssa.Value
187+
for i, v := range b.Values {
188+
if lv.valueEffect(v, live) {
189+
// Record that liveness changes but not emit a map now.
190+
// For a sequence of StoreRegs we only need to emit one
191+
// at last.
192+
lastv = v
193+
}
194+
if lastv != nil && (mayFault(v) || i == len(b.Values)-1) {
195+
// Emit the liveness map if it may fault or at the end of
196+
// the block. We may need a traceback if the instruction
197+
// may cause a panic.
198+
var added bool
199+
lv.valueIdx[lastv.ID], added = addToSet(live)
200+
if added {
201+
// live is added to bvset and we cannot modify it now.
202+
// Make a copy.
203+
t := live
204+
live = bitvec.New(nargs)
205+
live.Copy(t)
206+
}
207+
lastv = nil
208+
}
209+
}
210+
211+
// Sanity check.
212+
if !live.Eq(be.liveout) {
213+
panic("wrong arg liveness map at block end")
214+
}
215+
}
216+
217+
// Emit funcdata symbol, update indices to offsets in the symbol data.
218+
lsym := lv.emit()
219+
fn.LSym.Func().ArgLiveInfo = lsym
220+
221+
//lv.print()
222+
223+
p := pp.Prog(obj.AFUNCDATA)
224+
p.From.SetConst(objabi.FUNCDATA_ArgLiveInfo)
225+
p.To.Type = obj.TYPE_MEM
226+
p.To.Name = obj.NAME_EXTERN
227+
p.To.Sym = lsym
228+
229+
return lv.blockIdx, lv.valueIdx
230+
}
231+
232+
// valueEffect applies the effect of v to live, return whether it is changed.
233+
func (lv *argLiveness) valueEffect(v *ssa.Value, live bitvec.BitVec) bool {
234+
if v.Op != ssa.OpStoreReg { // TODO: include other store instructions?
235+
return false
236+
}
237+
n, off := ssa.AutoVar(v)
238+
if n.Class != ir.PPARAM {
239+
return false
240+
}
241+
i, ok := lv.idx[nameOff{n, off}]
242+
if !ok || live.Get(i) {
243+
return false
244+
}
245+
live.Set(i)
246+
return true
247+
}
248+
249+
func mayFault(v *ssa.Value) bool {
250+
switch v.Op {
251+
case ssa.OpLoadReg, ssa.OpStoreReg, ssa.OpCopy, ssa.OpPhi,
252+
ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive,
253+
ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN, ssa.OpMakeResult,
254+
ssa.OpConvert, ssa.OpInlMark, ssa.OpGetG:
255+
return false
256+
}
257+
if len(v.Args) == 0 {
258+
return false // assume constant op cannot fault
259+
}
260+
return true // conservatively assume all other ops could fault
261+
}
262+
263+
func (lv *argLiveness) print() {
264+
fmt.Println("argument liveness:", lv.f.Name)
265+
live := bitvec.New(int32(len(lv.args)))
266+
for _, b := range lv.f.Blocks {
267+
be := &lv.be[b.ID]
268+
269+
fmt.Printf("%v: live in: ", b)
270+
lv.printLivenessVec(be.livein)
271+
if idx, ok := lv.blockIdx[b.ID]; ok {
272+
fmt.Printf(" #%d", idx)
273+
}
274+
fmt.Println()
275+
276+
for _, v := range b.Values {
277+
if lv.valueEffect(v, live) {
278+
fmt.Printf(" %v: ", v)
279+
lv.printLivenessVec(live)
280+
if idx, ok := lv.valueIdx[v.ID]; ok {
281+
fmt.Printf(" #%d", idx)
282+
}
283+
fmt.Println()
284+
}
285+
}
286+
287+
fmt.Printf("%v: live out: ", b)
288+
lv.printLivenessVec(be.liveout)
289+
fmt.Println()
290+
}
291+
fmt.Println("liveness maps data:", lv.fn.LSym.Func().ArgLiveInfo.P)
292+
}
293+
294+
func (lv *argLiveness) printLivenessVec(bv bitvec.BitVec) {
295+
for i, a := range lv.args {
296+
if bv.Get(int32(i)) {
297+
fmt.Printf("%v ", a)
298+
}
299+
}
300+
}
301+
302+
func (lv *argLiveness) emit() *obj.LSym {
303+
livenessMaps := lv.bvset.extractUnique()
304+
305+
// stack offsets of register arg spill slots
306+
argOffsets := make([]uint8, len(lv.args))
307+
for i, a := range lv.args {
308+
off := a.FrameOffset()
309+
if off > 0xff {
310+
panic("offset too large")
311+
}
312+
argOffsets[i] = uint8(off)
313+
}
314+
315+
idx2off := make([]int, len(livenessMaps))
316+
317+
lsym := base.Ctxt.Lookup(lv.fn.LSym.Name + ".argliveinfo")
318+
lsym.Set(obj.AttrContentAddressable, true)
319+
320+
off := objw.Uint8(lsym, 0, argOffsets[0]) // smallest offset that needs liveness info.
321+
for idx, live := range livenessMaps {
322+
idx2off[idx] = off
323+
off = objw.BitVec(lsym, off, live)
324+
}
325+
326+
// Update liveness indices to offsets.
327+
for i, x := range lv.blockIdx {
328+
if x != allLiveIdx {
329+
lv.blockIdx[i] = idx2off[x]
330+
}
331+
}
332+
for i, x := range lv.valueIdx {
333+
if x != allLiveIdx {
334+
lv.valueIdx[i] = idx2off[x]
335+
}
336+
}
337+
338+
return lsym
339+
}

0 commit comments

Comments
 (0)