@@ -12,7 +12,7 @@ import (
12
12
)
13
13
14
14
var ssaRegToReg = []int16 {
15
- ppc64 .REGZERO ,
15
+ // ppc64.REGZERO, // not an SSA reg
16
16
ppc64 .REGSP ,
17
17
ppc64 .REG_R2 ,
18
18
ppc64 .REG_R3 ,
@@ -44,6 +44,53 @@ var ssaRegToReg = []int16{
44
44
ppc64 .REG_R29 ,
45
45
ppc64 .REGG ,
46
46
ppc64 .REGTMP ,
47
+
48
+ ppc64 .REG_F0 ,
49
+ ppc64 .REG_F1 ,
50
+ ppc64 .REG_F2 ,
51
+ ppc64 .REG_F3 ,
52
+ ppc64 .REG_F4 ,
53
+ ppc64 .REG_F5 ,
54
+ ppc64 .REG_F6 ,
55
+ ppc64 .REG_F7 ,
56
+ ppc64 .REG_F8 ,
57
+ ppc64 .REG_F9 ,
58
+ ppc64 .REG_F10 ,
59
+ ppc64 .REG_F11 ,
60
+ ppc64 .REG_F12 ,
61
+ ppc64 .REG_F13 ,
62
+ ppc64 .REG_F14 ,
63
+ ppc64 .REG_F15 ,
64
+ ppc64 .REG_F16 ,
65
+ ppc64 .REG_F17 ,
66
+ ppc64 .REG_F18 ,
67
+ ppc64 .REG_F19 ,
68
+ ppc64 .REG_F20 ,
69
+ ppc64 .REG_F21 ,
70
+ ppc64 .REG_F22 ,
71
+ ppc64 .REG_F23 ,
72
+ ppc64 .REG_F24 ,
73
+ ppc64 .REG_F25 ,
74
+ ppc64 .REG_F26 ,
75
+ ppc64 .REG_F27 ,
76
+ ppc64 .REG_F28 ,
77
+ ppc64 .REG_F29 ,
78
+ ppc64 .REG_F30 ,
79
+ ppc64 .REG_F31 ,
80
+
81
+ // ppc64.REG_CR0,
82
+ // ppc64.REG_CR1,
83
+ // ppc64.REG_CR2,
84
+ // ppc64.REG_CR3,
85
+ // ppc64.REG_CR4,
86
+ // ppc64.REG_CR5,
87
+ // ppc64.REG_CR6,
88
+ // ppc64.REG_CR7,
89
+
90
+ ppc64 .REG_CR ,
91
+ // ppc64.REG_XER,
92
+ // ppc64.REG_LR,
93
+ // ppc64.REG_CTR,
47
94
}
48
95
49
96
// Associated condition bit
@@ -98,7 +145,26 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
98
145
// input args need no code
99
146
case ssa .OpSP , ssa .OpSB :
100
147
// nothing to do
101
- case ssa .OpCopy :
148
+
149
+ case ssa .OpCopy , ssa .OpPPC64MOVDconvert :
150
+ // TODO: copy of floats
151
+ if v .Type .IsMemory () {
152
+ return
153
+ }
154
+ x := gc .SSARegNum (v .Args [0 ])
155
+ y := gc .SSARegNum (v )
156
+ if x != y {
157
+ p := gc .Prog (ppc64 .AMOVD )
158
+ p .From .Type = obj .TYPE_REG
159
+ p .From .Reg = x
160
+ p .To .Reg = y
161
+ p .To .Type = obj .TYPE_REG
162
+ }
163
+
164
+ case ssa .OpPPC64LoweredGetClosurePtr :
165
+ // Closure pointer is R11 (already)
166
+ gc .CheckLoweredGetClosurePtr (v )
167
+
102
168
case ssa .OpLoadReg :
103
169
// TODO: by type
104
170
p := gc .Prog (ppc64 .AMOVD )
@@ -138,8 +204,8 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
138
204
r2 := gc .SSARegNum (v .Args [1 ])
139
205
p := gc .Prog (v .Op .Asm ())
140
206
p .From .Type = obj .TYPE_REG
141
- p .From .Reg = r1
142
- p .Reg = r2
207
+ p .From .Reg = r2
208
+ p .Reg = r1
143
209
p .To .Type = obj .TYPE_REG
144
210
p .To .Reg = r
145
211
case ssa .OpPPC64NEG :
@@ -195,7 +261,7 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
195
261
v .Fatalf ("bad reg %s for symbol type %T, want %s" , reg .Name (), v .Aux , wantreg )
196
262
}
197
263
198
- case ssa .OpPPC64MOVDconst , ssa .OpPPC64MOVWconst , ssa .OpPPC64MOVHconst , ssa . OpPPC64MOVBconst , ssa . OpPPC64FMOVDconst , ssa .OpPPC64FMOVSconst :
264
+ case ssa .OpPPC64MOVDconst , ssa .OpPPC64MOVWconst , ssa .OpPPC64FMOVDconst , ssa .OpPPC64FMOVSconst :
199
265
p := gc .Prog (v .Op .Asm ())
200
266
p .From .Type = obj .TYPE_CONST
201
267
p .From .Offset = v .AuxInt
@@ -261,21 +327,79 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
261
327
p .To .Type = obj .TYPE_MEM
262
328
p .To .Reg = gc .SSARegNum (v .Args [0 ])
263
329
gc .AddAux (& p .To , v )
330
+
264
331
case ssa .OpPPC64CALLstatic :
265
- // TODO: deferreturn
332
+ if v .Aux .(* gc.Sym ) == gc .Deferreturn .Sym {
333
+ // Deferred calls will appear to be returning to
334
+ // the CALL deferreturn(SB) that we are about to emit.
335
+ // However, the stack trace code will show the line
336
+ // of the instruction byte before the return PC.
337
+ // To avoid that being an unrelated instruction,
338
+ // insert two actual hardware NOPs that will have the right line number.
339
+ // This is different from obj.ANOP, which is a virtual no-op
340
+ // that doesn't make it into the instruction stream.
341
+ // PPC64 is unusual because TWO nops are required
342
+ // (see gc/cgen.go, gc/plive.go)
343
+ ginsnop ()
344
+ ginsnop ()
345
+ }
266
346
p := gc .Prog (obj .ACALL )
267
347
p .To .Type = obj .TYPE_MEM
268
348
p .To .Name = obj .NAME_EXTERN
269
349
p .To .Sym = gc .Linksym (v .Aux .(* gc.Sym ))
270
350
if gc .Maxarg < v .AuxInt {
271
351
gc .Maxarg = v .AuxInt
272
352
}
353
+ case ssa .OpPPC64CALLclosure :
354
+ p := gc .Prog (obj .ACALL )
355
+ p .To .Type = obj .TYPE_MEM
356
+ p .To .Reg = gc .SSARegNum (v .Args [0 ])
357
+ if gc .Maxarg < v .AuxInt {
358
+ gc .Maxarg = v .AuxInt
359
+ }
360
+ case ssa .OpPPC64CALLdefer :
361
+ p := gc .Prog (obj .ACALL )
362
+ p .To .Type = obj .TYPE_MEM
363
+ p .To .Name = obj .NAME_EXTERN
364
+ p .To .Sym = gc .Linksym (gc .Deferproc .Sym )
365
+ if gc .Maxarg < v .AuxInt {
366
+ gc .Maxarg = v .AuxInt
367
+ }
368
+ case ssa .OpPPC64CALLgo :
369
+ p := gc .Prog (obj .ACALL )
370
+ p .To .Type = obj .TYPE_MEM
371
+ p .To .Name = obj .NAME_EXTERN
372
+ p .To .Sym = gc .Linksym (gc .Newproc .Sym )
373
+ if gc .Maxarg < v .AuxInt {
374
+ gc .Maxarg = v .AuxInt
375
+ }
376
+ case ssa .OpPPC64CALLinter :
377
+ p := gc .Prog (obj .ACALL )
378
+ p .To .Type = obj .TYPE_MEM
379
+ p .To .Reg = gc .SSARegNum (v .Args [0 ])
380
+ if gc .Maxarg < v .AuxInt {
381
+ gc .Maxarg = v .AuxInt
382
+ }
383
+
273
384
case ssa .OpVarDef :
274
385
gc .Gvardef (v .Aux .(* gc.Node ))
275
386
case ssa .OpVarKill :
276
387
gc .Gvarkill (v .Aux .(* gc.Node ))
277
388
case ssa .OpVarLive :
278
389
gc .Gvarlive (v .Aux .(* gc.Node ))
390
+ case ssa .OpKeepAlive :
391
+ if ! v .Args [0 ].Type .IsPtrShaped () {
392
+ v .Fatalf ("keeping non-pointer alive %v" , v .Args [0 ])
393
+ }
394
+ n , off := gc .AutoVar (v .Args [0 ])
395
+ if n == nil {
396
+ v .Fatalf ("KeepLive with non-spilled value %s %s" , v , v .Args [0 ])
397
+ }
398
+ if off != 0 {
399
+ v .Fatalf ("KeepLive with non-zero offset spill location %s:%d" , n , off )
400
+ }
401
+ gc .Gvarlive (n )
402
+
279
403
case ssa .OpPPC64Equal ,
280
404
ssa .OpPPC64NotEqual ,
281
405
ssa .OpPPC64LessThan ,
@@ -295,6 +419,76 @@ func ssaGenValue(s *gc.SSAGenState, v *ssa.Value) {
295
419
v .Fatalf ("phi arg at different location than phi: %v @ %v, but arg %v @ %v\n %s\n " , v , loc , a , aloc , v .Block .Func )
296
420
}
297
421
}
422
+
423
+ case ssa .OpPPC64LoweredNilCheck :
424
+ // Optimization - if the subsequent block has a load or store
425
+ // at the same address, we don't need to issue this instruction.
426
+ // mem := v.Args[1]
427
+ // for _, w := range v.Block.Succs[0].Block().Values {
428
+ // if w.Op == ssa.OpPhi {
429
+ // if w.Type.IsMemory() {
430
+ // mem = w
431
+ // }
432
+ // continue
433
+ // }
434
+ // if len(w.Args) == 0 || !w.Args[len(w.Args)-1].Type.IsMemory() {
435
+ // // w doesn't use a store - can't be a memory op.
436
+ // continue
437
+ // }
438
+ // if w.Args[len(w.Args)-1] != mem {
439
+ // v.Fatalf("wrong store after nilcheck v=%s w=%s", v, w)
440
+ // }
441
+ // switch w.Op {
442
+ // case ssa.OpARMMOVBload, ssa.OpARMMOVBUload, ssa.OpARMMOVHload, ssa.OpARMMOVHUload,
443
+ // ssa.OpARMMOVWload, ssa.OpARMMOVFload, ssa.OpARMMOVDload,
444
+ // ssa.OpARMMOVBstore, ssa.OpARMMOVHstore, ssa.OpARMMOVWstore,
445
+ // ssa.OpARMMOVFstore, ssa.OpARMMOVDstore:
446
+ // // arg0 is ptr, auxint is offset
447
+ // if w.Args[0] == v.Args[0] && w.Aux == nil && w.AuxInt >= 0 && w.AuxInt < minZeroPage {
448
+ // if gc.Debug_checknil != 0 && int(v.Line) > 1 {
449
+ // gc.Warnl(v.Line, "removed nil check")
450
+ // }
451
+ // return
452
+ // }
453
+ // case ssa.OpARMDUFFZERO, ssa.OpARMLoweredZero, ssa.OpARMLoweredZeroU:
454
+ // // arg0 is ptr
455
+ // if w.Args[0] == v.Args[0] {
456
+ // if gc.Debug_checknil != 0 && int(v.Line) > 1 {
457
+ // gc.Warnl(v.Line, "removed nil check")
458
+ // }
459
+ // return
460
+ // }
461
+ // case ssa.OpARMDUFFCOPY, ssa.OpARMLoweredMove, ssa.OpARMLoweredMoveU:
462
+ // // arg0 is dst ptr, arg1 is src ptr
463
+ // if w.Args[0] == v.Args[0] || w.Args[1] == v.Args[0] {
464
+ // if gc.Debug_checknil != 0 && int(v.Line) > 1 {
465
+ // gc.Warnl(v.Line, "removed nil check")
466
+ // }
467
+ // return
468
+ // }
469
+ // default:
470
+ // }
471
+ // if w.Type.IsMemory() {
472
+ // if w.Op == ssa.OpVarDef || w.Op == ssa.OpVarKill || w.Op == ssa.OpVarLive {
473
+ // // these ops are OK
474
+ // mem = w
475
+ // continue
476
+ // }
477
+ // // We can't delay the nil check past the next store.
478
+ // break
479
+ // }
480
+ // }
481
+ // Issue a load which will fault if arg is nil.
482
+ p := gc .Prog (ppc64 .AMOVB )
483
+ p .From .Type = obj .TYPE_MEM
484
+ p .From .Reg = gc .SSARegNum (v .Args [0 ])
485
+ gc .AddAux (& p .From , v )
486
+ p .To .Type = obj .TYPE_REG
487
+ p .To .Reg = ppc64 .REGTMP
488
+ if gc .Debug_checknil != 0 && v .Line > 1 { // v.Line==1 in generated wrappers
489
+ gc .Warnl (v .Line , "generated nil check" )
490
+ }
491
+
298
492
default :
299
493
v .Unimplementedf ("genValue not implemented: %s" , v .LongString ())
300
494
}
@@ -321,6 +515,26 @@ func ssaGenBlock(s *gc.SSAGenState, b, next *ssa.Block) {
321
515
s .SetLineno (b .Line )
322
516
323
517
switch b .Kind {
518
+
519
+ case ssa .BlockDefer :
520
+ // defer returns in R3:
521
+ // 0 if we should continue executing
522
+ // 1 if we should jump to deferreturn call
523
+ p := gc .Prog (ppc64 .ACMP )
524
+ p .From .Type = obj .TYPE_REG
525
+ p .From .Reg = ppc64 .REG_R3
526
+ p .To .Type = obj .TYPE_REG
527
+ p .To .Reg = ppc64 .REG_R0
528
+
529
+ p = gc .Prog (ppc64 .ABNE )
530
+ p .To .Type = obj .TYPE_BRANCH
531
+ s .Branches = append (s .Branches , gc.Branch {P : p , B : b .Succs [1 ].Block ()})
532
+ if b .Succs [0 ].Block () != next {
533
+ p := gc .Prog (obj .AJMP )
534
+ p .To .Type = obj .TYPE_BRANCH
535
+ s .Branches = append (s .Branches , gc.Branch {P : p , B : b .Succs [0 ].Block ()})
536
+ }
537
+
324
538
case ssa .BlockPlain , ssa .BlockCall , ssa .BlockCheck :
325
539
if b .Succs [0 ].Block () != next {
326
540
p := gc .Prog (obj .AJMP )
0 commit comments