Skip to content

Commit 4e2b84f

Browse files
committed
compile: prefer an AND instead of SHR+SHL instructions
On modern 64bit CPUs a SHR, SHL or AND instruction take 1 cycle to execute. A pair of shifts that operate on the same register will take 2 cycles and needs to wait for the input register value to be available. Large constants used to mask the high bits of a register with an AND instruction can not be encoded as an immediate in the AND instruction on amd64 and therefore need to be loaded into a register with a MOV instruction. However that MOV instruction is not dependent on the output register and on many CPUs does not compete with the AND or shift instructions for execution ports. Using a pair of shifts to mask high bits instead of an AND to mask high bits of a register has a shorter encoding and uses one less general purpose register but is slower due to taking one clock cycle longer if there is no register pressure that would make the AND variant need to generate a spill. For example the instructions emitted for (x & 1 << 63) before this CL are: 48c1ea3f SHRQ $0x3f, DX 48c1e23f SHLQ $0x3f, DX after this CL the instructions are the same as GCC and LLVM use: 48b80000000000000080 MOVQ $0x8000000000000000, AX 4821d0 ANDQ DX, AX Some platforms such as arm64 already have SSA optimization rules to fuse two shift instructions back into an AND. Removing the general rule to rewrite AND to SHR+SHL speeds up this benchmark: var GlobalU uint func BenchmarkAndHighBits(b *testing.B) { x := uint(0) for i := 0; i < b.N; i++ { x &= 1 << 63 } GlobalU = x } amd64/darwin on Intel(R) Core(TM) i7-3520M CPU @ 2.90GHz: name old time/op new time/op delta AndHighBits-4 0.61ns ± 6% 0.42ns ± 6% -31.42% (p=0.000 n=25+25): 'go run run.go -all_codegen -v codegen' passes with following adjustments: ARM64: The BFXIL pattern ((x << lc) >> rc | y & ac) needed adjustment since ORshiftRL generation fusing '>> rc' and '|' interferes with matching ((x << lc) >> rc) to generate UBFX. Previously ORshiftLL was created first using the shifts generated for (y & ac). S390X: Add rules for abs and copysign to match use of AND instead of SHIFTs. Updates #33826 Updates #32781 Change-Id: I43227da76b625de03fbc51117162b23b9c678cdb Reviewed-on: https://go-review.googlesource.com/c/go/+/194297 Run-TryBot: Martin Möhrmann <[email protected]> TryBot-Result: Gobot Gobot <[email protected]> Reviewed-by: Cherry Zhang <[email protected]>
1 parent ecc7dd5 commit 4e2b84f

File tree

7 files changed

+625
-293
lines changed

7 files changed

+625
-293
lines changed

src/cmd/compile/internal/ssa/gen/ARM64.rules

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1863,9 +1863,8 @@
18631863
(XORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
18641864
-> (EXTRWconst [32-c] x2 x)
18651865

1866-
// Generic rules rewrite certain AND to a pair of shifts.
1867-
// However, on ARM64 the bitmask can fit into an instruction.
1868-
// Rewrite it back to AND.
1866+
// Rewrite special pairs of shifts to AND.
1867+
// On ARM64 the bitmask can fit into an instruction.
18691868
(SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 -> (ANDconst [1<<uint(64-c)-1] x) // mask out high bits
18701869
(SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 -> (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits
18711870

@@ -1971,6 +1970,8 @@
19711970
-> (BFXIL [bfc] y x)
19721971
(ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == getARM64BFwidth(bfc)
19731972
-> (BFXIL [bfc] y x)
1973+
(ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) && lc < rc && ac == ^((1<<uint(64-rc)-1))
1974+
-> (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
19741975

19751976
// do combined loads
19761977
// little endian loads

src/cmd/compile/internal/ssa/gen/S390X.rules

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -701,6 +701,8 @@
701701
// may need to be reworked when NIHH/OIHH are added
702702
(SRDconst [1] (SLDconst [1] (LGDR <t> x))) -> (LGDR <t> (LPDFR <x.Type> x))
703703
(LDGR <t> (SRDconst [1] (SLDconst [1] x))) -> (LPDFR (LDGR <t> x))
704+
(AND (MOVDconst [^(-1<<63)]) (LGDR <t> x)) -> (LGDR <t> (LPDFR <x.Type> x))
705+
(LDGR <t> (AND (MOVDconst [^(-1<<63)]) x)) -> (LPDFR (LDGR <t> x))
704706
(OR (MOVDconst [-1<<63]) (LGDR <t> x)) -> (LGDR <t> (LNDFR <x.Type> x))
705707
(LDGR <t> (OR (MOVDconst [-1<<63]) x)) -> (LNDFR (LDGR <t> x))
706708

@@ -710,6 +712,8 @@
710712
// detect copysign
711713
(OR (SLDconst [63] (SRDconst [63] (LGDR x))) (LGDR (LPDFR <t> y))) -> (LGDR (CPSDR <t> y x))
712714
(OR (SLDconst [63] (SRDconst [63] (LGDR x))) (MOVDconst [c])) && c & -1<<63 == 0 -> (LGDR (CPSDR <x.Type> (FMOVDconst <x.Type> [c]) x))
715+
(OR (AND (MOVDconst [-1<<63]) (LGDR x)) (LGDR (LPDFR <t> y))) -> (LGDR (CPSDR <t> y x))
716+
(OR (AND (MOVDconst [-1<<63]) (LGDR x)) (MOVDconst [c])) && c & -1<<63 == 0 -> (LGDR (CPSDR <x.Type> (FMOVDconst <x.Type> [c]) x))
713717
(CPSDR y (FMOVDconst [c])) && c & -1<<63 == 0 -> (LPDFR y)
714718
(CPSDR y (FMOVDconst [c])) && c & -1<<63 != 0 -> (LNDFR y)
715719

src/cmd/compile/internal/ssa/gen/generic.rules

Lines changed: 0 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -542,14 +542,6 @@
542542
(Slicemask (Const64 [x])) && x > 0 -> (Const64 [-1])
543543
(Slicemask (Const64 [0])) -> (Const64 [0])
544544

545-
// Rewrite AND of consts as shifts if possible, slightly faster for 64 bit operands
546-
// leading zeros can be shifted left, then right
547-
(And64 <t> (Const64 [y]) x) && nlz(y) + nto(y) == 64 && nto(y) >= 32
548-
-> (Rsh64Ux64 (Lsh64x64 <t> x (Const64 <t> [nlz(y)])) (Const64 <t> [nlz(y)]))
549-
// trailing zeros can be shifted right, then left
550-
(And64 <t> (Const64 [y]) x) && nlo(y) + ntz(y) == 64 && ntz(y) >= 32
551-
-> (Lsh64x64 (Rsh64Ux64 <t> x (Const64 <t> [ntz(y)])) (Const64 <t> [ntz(y)]))
552-
553545
// simplifications often used for lengths. e.g. len(s[i:i+5])==5
554546
(Sub(64|32|16|8) (Add(64|32|16|8) x y) x) -> y
555547
(Sub(64|32|16|8) (Add(64|32|16|8) x y) y) -> x

src/cmd/compile/internal/ssa/rewriteARM64.go

Lines changed: 27 additions & 0 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

0 commit comments

Comments
 (0)