Skip to content

Commit 07b4abd

Browse files
committed
all: remove the nacl port (part 2, amd64p32 + toolchain)
This is part two if the nacl removal. Part 1 was CL 199499. This CL removes amd64p32 support, which might be useful in the future if we implement the x32 ABI. It also removes the nacl bits in the toolchain, and some remaining nacl bits. Updates #30439 Change-Id: I2475d5bb066d1b474e00e40d95b520e7c2e286e1 Reviewed-on: https://go-review.googlesource.com/c/go/+/200077 Reviewed-by: Ian Lance Taylor <[email protected]>
1 parent 19a7490 commit 07b4abd

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

70 files changed

+387
-4072
lines changed

src/cmd/compile/internal/amd64/ggen.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Pr
9494
if cnt%16 != 0 {
9595
p = pp.Appendpp(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
9696
}
97-
} else if !gc.Nacl && !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
97+
} else if !isPlan9 && (cnt <= int64(128*gc.Widthreg)) {
9898
if *state&x0 == 0 {
9999
p = pp.Appendpp(p, x86.AXORPS, obj.TYPE_REG, x86.REG_X0, 0, obj.TYPE_REG, x86.REG_X0, 0)
100100
*state |= x0

src/cmd/compile/internal/arm/ggen.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ func zerorange(pp *gc.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog
2323
for i := int64(0); i < cnt; i += int64(gc.Widthptr) {
2424
p = pp.Appendpp(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
2525
}
26-
} else if !gc.Nacl && (cnt <= int64(128*gc.Widthptr)) {
26+
} else if cnt <= int64(128*gc.Widthptr) {
2727
p = pp.Appendpp(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
2828
p.Reg = arm.REGSP
2929
p = pp.Appendpp(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)

src/cmd/compile/internal/gc/go.go

-2
Original file line numberDiff line numberDiff line change
@@ -247,8 +247,6 @@ var Ctxt *obj.Link
247247

248248
var writearchive bool
249249

250-
var Nacl bool
251-
252250
var nodfp *Node
253251

254252
var disable_checknil int

src/cmd/compile/internal/gc/main.go

-1
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,6 @@ func Main(archInit func(*Arch)) {
187187
// pseudo-package used for methods with anonymous receivers
188188
gopkg = types.NewPkg("go", "")
189189

190-
Nacl = objabi.GOOS == "nacl"
191190
Wasm := objabi.GOARCH == "wasm"
192191

193192
// Whether the limit for stack-allocated objects is much smaller than normal.

src/cmd/compile/internal/ssa/config.go

-13
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,6 @@ type Config struct {
3838
useSSE bool // Use SSE for non-float operations
3939
useAvg bool // Use optimizations that need Avg* operations
4040
useHmul bool // Use optimizations that need Hmul* operations
41-
nacl bool // GOOS=nacl
4241
use387 bool // GO386=387
4342
SoftFloat bool //
4443
Race bool // race detector enabled
@@ -339,7 +338,6 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
339338
}
340339
c.ctxt = ctxt
341340
c.optimize = optimize
342-
c.nacl = objabi.GOOS == "nacl"
343341
c.useSSE = true
344342

345343
// Don't use Duff's device nor SSE on Plan 9 AMD64, because
@@ -349,17 +347,6 @@ func NewConfig(arch string, types Types, ctxt *obj.Link, optimize bool) *Config
349347
c.useSSE = false
350348
}
351349

352-
if c.nacl {
353-
c.noDuffDevice = true // Don't use Duff's device on NaCl
354-
355-
// Returns clobber BP on nacl/386, so the write
356-
// barrier does.
357-
opcodeTable[Op386LoweredWB].reg.clobbers |= 1 << 5 // BP
358-
359-
// ... and SI on nacl/amd64.
360-
opcodeTable[OpAMD64LoweredWB].reg.clobbers |= 1 << 6 // SI
361-
}
362-
363350
if ctxt.Flag_shared {
364351
// LoweredWB is secretly a CALL and CALLs on 386 in
365352
// shared mode get rewritten by obj6.go to go through

src/cmd/compile/internal/ssa/gen/AMD64.rules

+41-41
Original file line numberDiff line numberDiff line change
@@ -596,32 +596,32 @@
596596
// into tests for carry flags.
597597
// ULT and SETB check the carry flag; they are identical to CS and SETCS. Same, mutatis
598598
// mutandis, for UGE and SETAE, and CC and SETCC.
599-
((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> ((ULT|UGE) (BTL x y))
600-
((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> ((ULT|UGE) (BTQ x y))
601-
((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c) && !config.nacl
599+
((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) -> ((ULT|UGE) (BTL x y))
600+
((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) -> ((ULT|UGE) (BTQ x y))
601+
((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c)
602602
-> ((ULT|UGE) (BTLconst [log2uint32(c)] x))
603-
((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c) && !config.nacl
603+
((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c)
604604
-> ((ULT|UGE) (BTQconst [log2(c)] x))
605-
((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c) && !config.nacl
605+
((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
606606
-> ((ULT|UGE) (BTQconst [log2(c)] x))
607-
(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) && !config.nacl -> (SET(B|AE) (BTL x y))
608-
(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) && !config.nacl -> (SET(B|AE) (BTQ x y))
609-
(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c) && !config.nacl
607+
(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) -> (SET(B|AE) (BTL x y))
608+
(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) -> (SET(B|AE) (BTQ x y))
609+
(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(c)
610610
-> (SET(B|AE) (BTLconst [log2uint32(c)] x))
611-
(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c) && !config.nacl
611+
(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(c)
612612
-> (SET(B|AE) (BTQconst [log2(c)] x))
613-
(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c) && !config.nacl
613+
(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
614614
-> (SET(B|AE) (BTQconst [log2(c)] x))
615615
// SET..store variant
616-
(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem) && !config.nacl
616+
(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
617617
-> (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
618-
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem) && !config.nacl
618+
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
619619
-> (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem)
620-
(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(c) && !config.nacl
620+
(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(c)
621621
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [log2uint32(c)] x) mem)
622-
(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(c) && !config.nacl
622+
(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(c)
623623
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [log2(c)] x) mem)
624-
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c) && !config.nacl
624+
(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c)
625625
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [log2(c)] x) mem)
626626

627627
// Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
@@ -641,29 +641,29 @@
641641
(SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) -> (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem)
642642

643643
// Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
644-
(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) && !config.nacl -> (BTS(Q|L) x y)
645-
(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) && !config.nacl -> (BTC(Q|L) x y)
644+
(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) -> (BTS(Q|L) x y)
645+
(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) -> (BTC(Q|L) x y)
646646

647647
// Convert ORconst into BTS, if the code gets smaller, with boundary being
648648
// (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes).
649-
((ORQ|XORQ)const [c] x) && isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
649+
((ORQ|XORQ)const [c] x) && isUint64PowerOfTwo(c) && uint64(c) >= 128
650650
-> (BT(S|C)Qconst [log2(c)] x)
651-
((ORL|XORL)const [c] x) && isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
651+
((ORL|XORL)const [c] x) && isUint32PowerOfTwo(c) && uint64(c) >= 128
652652
-> (BT(S|C)Lconst [log2uint32(c)] x)
653-
((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
653+
((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128
654654
-> (BT(S|C)Qconst [log2(c)] x)
655-
((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(c) && uint64(c) >= 128 && !config.nacl
655+
((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(c) && uint64(c) >= 128
656656
-> (BT(S|C)Lconst [log2uint32(c)] x)
657657

658658
// Recognize bit clearing: a &^= 1<<b
659-
(AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) && !config.nacl -> (BTR(Q|L) x y)
660-
(ANDQconst [c] x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
659+
(AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) -> (BTR(Q|L) x y)
660+
(ANDQconst [c] x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128
661661
-> (BTRQconst [log2(^c)] x)
662-
(ANDLconst [c] x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
662+
(ANDLconst [c] x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128
663663
-> (BTRLconst [log2uint32(^c)] x)
664-
(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
664+
(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128
665665
-> (BTRQconst [log2(^c)] x)
666-
(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128 && !config.nacl
666+
(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(^c) && uint64(^c) >= 128
667667
-> (BTRLconst [log2uint32(^c)] x)
668668

669669
// Special-case bit patterns on first/last bit.
@@ -677,40 +677,40 @@
677677
// We thus special-case them, by detecting the shift patterns.
678678

679679
// Special case resetting first/last bit
680-
(SHL(L|Q)const [1] (SHR(L|Q)const [1] x)) && !config.nacl
680+
(SHL(L|Q)const [1] (SHR(L|Q)const [1] x))
681681
-> (BTR(L|Q)const [0] x)
682-
(SHRLconst [1] (SHLLconst [1] x)) && !config.nacl
682+
(SHRLconst [1] (SHLLconst [1] x))
683683
-> (BTRLconst [31] x)
684-
(SHRQconst [1] (SHLQconst [1] x)) && !config.nacl
684+
(SHRQconst [1] (SHLQconst [1] x))
685685
-> (BTRQconst [63] x)
686686

687687
// Special case testing first/last bit (with double-shift generated by generic.rules)
688-
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2 && !config.nacl
688+
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2
689689
-> ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
690-
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2 && !config.nacl
690+
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2
691691
-> ((SETB|SETAE|ULT|UGE) (BTQconst [31] x))
692-
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2 && !config.nacl
692+
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2
693693
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
694-
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2 && !config.nacl
694+
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2
695695
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
696696

697-
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2 && !config.nacl
697+
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2
698698
-> ((SETB|SETAE|ULT|UGE) (BTQconst [0] x))
699-
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2 && !config.nacl
699+
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2
700700
-> ((SETB|SETAE|ULT|UGE) (BTLconst [0] x))
701-
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2 && !config.nacl
701+
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2
702702
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem)
703-
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2 && !config.nacl
703+
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2
704704
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem)
705705

706706
// Special-case manually testing last bit with "a>>63 != 0" (without "&1")
707-
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2 && !config.nacl
707+
((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2
708708
-> ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
709-
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2 && !config.nacl
709+
((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2
710710
-> ((SETB|SETAE|ULT|UGE) (BTLconst [31] x))
711-
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2 && !config.nacl
711+
(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2
712712
-> (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
713-
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2 && !config.nacl
713+
(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2
714714
-> (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
715715

716716
// Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1)

src/cmd/compile/internal/ssa/gen/ARM.rules

+14-14
Original file line numberDiff line numberDiff line change
@@ -1246,20 +1246,20 @@
12461246
((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && objabi.GOARM>=6 -> (REV16 x)
12471247

12481248
// use indexed loads and stores
1249-
(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVWloadidx ptr idx mem)
1250-
(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVWstoreidx ptr idx val mem)
1251-
(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftLL ptr idx [c] mem)
1252-
(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftRL ptr idx [c] mem)
1253-
(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil && !config.nacl -> (MOVWloadshiftRA ptr idx [c] mem)
1254-
(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftLL ptr idx [c] val mem)
1255-
(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftRL ptr idx [c] val mem)
1256-
(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil && !config.nacl -> (MOVWstoreshiftRA ptr idx [c] val mem)
1257-
(MOVBUload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVBUloadidx ptr idx mem)
1258-
(MOVBload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVBloadidx ptr idx mem)
1259-
(MOVBstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVBstoreidx ptr idx val mem)
1260-
(MOVHUload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVHUloadidx ptr idx mem)
1261-
(MOVHload [0] {sym} (ADD ptr idx) mem) && sym == nil && !config.nacl -> (MOVHloadidx ptr idx mem)
1262-
(MOVHstore [0] {sym} (ADD ptr idx) val mem) && sym == nil && !config.nacl -> (MOVHstoreidx ptr idx val mem)
1249+
(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVWloadidx ptr idx mem)
1250+
(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil -> (MOVWstoreidx ptr idx val mem)
1251+
(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftLL ptr idx [c] mem)
1252+
(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftRL ptr idx [c] mem)
1253+
(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil -> (MOVWloadshiftRA ptr idx [c] mem)
1254+
(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftLL ptr idx [c] val mem)
1255+
(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftRL ptr idx [c] val mem)
1256+
(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil -> (MOVWstoreshiftRA ptr idx [c] val mem)
1257+
(MOVBUload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVBUloadidx ptr idx mem)
1258+
(MOVBload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVBloadidx ptr idx mem)
1259+
(MOVBstore [0] {sym} (ADD ptr idx) val mem) && sym == nil -> (MOVBstoreidx ptr idx val mem)
1260+
(MOVHUload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVHUloadidx ptr idx mem)
1261+
(MOVHload [0] {sym} (ADD ptr idx) mem) && sym == nil -> (MOVHloadidx ptr idx mem)
1262+
(MOVHstore [0] {sym} (ADD ptr idx) val mem) && sym == nil -> (MOVHstoreidx ptr idx val mem)
12631263

12641264
// constant folding in indexed loads and stores
12651265
(MOVWloadidx ptr (MOVWconst [c]) mem) -> (MOVWload [c] ptr mem)

src/cmd/compile/internal/ssa/regalloc.go

-9
Original file line numberDiff line numberDiff line change
@@ -625,15 +625,6 @@ func (s *regAllocState) init(f *Func) {
625625
s.f.fe.Fatalf(src.NoXPos, "arch %s not implemented", s.f.Config.arch)
626626
}
627627
}
628-
if s.f.Config.nacl {
629-
switch s.f.Config.arch {
630-
case "arm":
631-
s.allocatable &^= 1 << 9 // R9 is "thread pointer" on nacl/arm
632-
case "amd64p32":
633-
s.allocatable &^= 1 << 5 // BP - reserved for nacl
634-
s.allocatable &^= 1 << 15 // R15 - reserved for nacl
635-
}
636-
}
637628
if s.f.Config.use387 {
638629
s.allocatable &^= 1 << 15 // X7 disallowed (one 387 register is used as scratch space during SSE->387 generation in ../x86/387.go)
639630
}

0 commit comments

Comments
 (0)