Skip to content

Commit 73a196a

Browse files
committed
Recommit "[AArch64] Split bitmask immediate of bitwise AND operation"
This reverts the revert commit f85d8a5 with bug fixes. Original message: MOVi32imm + ANDWrr ==> ANDWri + ANDWri MOVi64imm + ANDXrr ==> ANDXri + ANDXri The mov pseudo instruction could be expanded to multiple mov instructions later. In this case, try to split the constant operand of mov instruction into two bitmask immediates. It makes only two AND instructions intead of multiple mov + and instructions. Added a peephole optimization pass on MIR level to implement it. Differential Revision: https://reviews.llvm.org/D109963
1 parent f701505 commit 73a196a

8 files changed

+498
-4
lines changed

llvm/lib/Target/AArch64/AArch64.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ FunctionPass *createAArch64A53Fix835769();
5151
FunctionPass *createFalkorHWPFFixPass();
5252
FunctionPass *createFalkorMarkStridedAccessesPass();
5353
FunctionPass *createAArch64BranchTargetsPass();
54+
FunctionPass *createAArch64MIPeepholeOptPass();
5455

5556
FunctionPass *createAArch64CleanupLocalDynamicTLSPass();
5657

@@ -82,6 +83,7 @@ void initializeAArch64SLSHardeningPass(PassRegistry&);
8283
void initializeAArch64SpeculationHardeningPass(PassRegistry&);
8384
void initializeAArch64LoadStoreOptPass(PassRegistry&);
8485
void initializeAArch64LowerHomogeneousPrologEpilogPass(PassRegistry &);
86+
void initializeAArch64MIPeepholeOptPass(PassRegistry &);
8587
void initializeAArch64SIMDInstrOptPass(PassRegistry&);
8688
void initializeAArch64O0PreLegalizerCombinerPass(PassRegistry &);
8789
void initializeAArch64PreLegalizerCombinerPass(PassRegistry&);
Lines changed: 235 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,235 @@
1+
//===- AArch64MIPeepholeOpt.cpp - AArch64 MI peephole optimization pass ---===//
2+
//
3+
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4+
// See https://llvm.org/LICENSE.txt for license information.
5+
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6+
//
7+
//===----------------------------------------------------------------------===//
8+
//
9+
// This pass performs below peephole optimizations on MIR level.
10+
//
11+
// 1. MOVi32imm + ANDWrr ==> ANDWri + ANDWri
12+
// MOVi64imm + ANDXrr ==> ANDXri + ANDXri
13+
//
14+
// The mov pseudo instruction could be expanded to multiple mov instructions
15+
// later. In this case, we could try to split the constant operand of mov
16+
// instruction into two bitmask immediates. It makes two AND instructions
17+
// intead of multiple `mov` + `and` instructions.
18+
//===----------------------------------------------------------------------===//
19+
20+
#include "AArch64ExpandImm.h"
21+
#include "AArch64InstrInfo.h"
22+
#include "MCTargetDesc/AArch64AddressingModes.h"
23+
#include "llvm/ADT/SetVector.h"
24+
#include "llvm/CodeGen/MachineDominators.h"
25+
#include "llvm/CodeGen/MachineLoopInfo.h"
26+
27+
using namespace llvm;
28+
29+
#define DEBUG_TYPE "aarch64-mi-peephole-opt"
30+
31+
namespace {
32+
33+
struct AArch64MIPeepholeOpt : public MachineFunctionPass {
34+
static char ID;
35+
36+
AArch64MIPeepholeOpt() : MachineFunctionPass(ID) {
37+
initializeAArch64MIPeepholeOptPass(*PassRegistry::getPassRegistry());
38+
}
39+
40+
const AArch64InstrInfo *TII;
41+
MachineLoopInfo *MLI;
42+
MachineRegisterInfo *MRI;
43+
44+
template <typename T>
45+
bool visitAND(MachineInstr &MI,
46+
SmallSetVector<MachineInstr *, 8> &ToBeRemoved);
47+
bool runOnMachineFunction(MachineFunction &MF) override;
48+
49+
StringRef getPassName() const override {
50+
return "AArch64 MI Peephole Optimization pass";
51+
}
52+
53+
void getAnalysisUsage(AnalysisUsage &AU) const override {
54+
AU.setPreservesCFG();
55+
AU.addRequired<MachineLoopInfo>();
56+
MachineFunctionPass::getAnalysisUsage(AU);
57+
}
58+
};
59+
60+
char AArch64MIPeepholeOpt::ID = 0;
61+
62+
} // end anonymous namespace
63+
64+
INITIALIZE_PASS(AArch64MIPeepholeOpt, "aarch64-mi-peephole-opt",
65+
"AArch64 MI Peephole Optimization", false, false)
66+
67+
template <typename T>
68+
static bool splitBitmaskImm(T Imm, unsigned RegSize, T &Imm1Enc, T &Imm2Enc) {
69+
T UImm = static_cast<T>(Imm);
70+
if (AArch64_AM::isLogicalImmediate(UImm, RegSize))
71+
return false;
72+
73+
// If this immediate can be handled by one instruction, do not split it.
74+
SmallVector<AArch64_IMM::ImmInsnModel, 4> Insn;
75+
AArch64_IMM::expandMOVImm(UImm, RegSize, Insn);
76+
if (Insn.size() == 1)
77+
return false;
78+
79+
// The bitmask immediate consists of consecutive ones. Let's say there is
80+
// constant 0b00000000001000000000010000000000 which does not consist of
81+
// consecutive ones. We can split it in to two bitmask immediate like
82+
// 0b00000000001111111111110000000000 and 0b11111111111000000000011111111111.
83+
// If we do AND with these two bitmask immediate, we can see original one.
84+
unsigned LowestBitSet = countTrailingZeros(UImm);
85+
unsigned HighestBitSet = Log2_64(UImm);
86+
87+
// Create a mask which is filled with one from the position of lowest bit set
88+
// to the position of highest bit set.
89+
T NewImm1 = (static_cast<T>(2) << HighestBitSet) -
90+
(static_cast<T>(1) << LowestBitSet);
91+
// Create a mask which is filled with one outside the position of lowest bit
92+
// set and the position of highest bit set.
93+
T NewImm2 = UImm | ~NewImm1;
94+
95+
// If the splitted value is not valid bitmask immediate, do not split this
96+
// constant.
97+
if (!AArch64_AM::isLogicalImmediate(NewImm2, RegSize))
98+
return false;
99+
100+
Imm1Enc = AArch64_AM::encodeLogicalImmediate(NewImm1, RegSize);
101+
Imm2Enc = AArch64_AM::encodeLogicalImmediate(NewImm2, RegSize);
102+
return true;
103+
}
104+
105+
template <typename T>
106+
bool AArch64MIPeepholeOpt::visitAND(
107+
MachineInstr &MI, SmallSetVector<MachineInstr *, 8> &ToBeRemoved) {
108+
// Try below transformation.
109+
//
110+
// MOVi32imm + ANDWrr ==> ANDWri + ANDWri
111+
// MOVi64imm + ANDXrr ==> ANDXri + ANDXri
112+
//
113+
// The mov pseudo instruction could be expanded to multiple mov instructions
114+
// later. Let's try to split the constant operand of mov instruction into two
115+
// bitmask immediates. It makes only two AND instructions intead of multiple
116+
// mov + and instructions.
117+
118+
unsigned RegSize = sizeof(T) * 8;
119+
assert((RegSize == 32 || RegSize == 64) &&
120+
"Invalid RegSize for AND bitmask peephole optimization");
121+
122+
// Check whether AND's MBB is in loop and the AND is loop invariant.
123+
MachineBasicBlock *MBB = MI.getParent();
124+
MachineLoop *L = MLI->getLoopFor(MBB);
125+
if (L && !L->isLoopInvariant(MI))
126+
return false;
127+
128+
// Check whether AND's operand is MOV with immediate.
129+
MachineInstr *MovMI = MRI->getUniqueVRegDef(MI.getOperand(2).getReg());
130+
MachineInstr *SubregToRegMI = nullptr;
131+
// If it is SUBREG_TO_REG, check its operand.
132+
if (MovMI->getOpcode() == TargetOpcode::SUBREG_TO_REG) {
133+
SubregToRegMI = MovMI;
134+
MovMI = MRI->getUniqueVRegDef(MovMI->getOperand(2).getReg());
135+
}
136+
137+
// If the MOV has multiple uses, do not split the immediate because it causes
138+
// more instructions.
139+
if (!MRI->hasOneUse(MovMI->getOperand(0).getReg()))
140+
return false;
141+
142+
if (MovMI->getOpcode() != AArch64::MOVi32imm &&
143+
MovMI->getOpcode() != AArch64::MOVi64imm)
144+
return false;
145+
146+
// Split the bitmask immediate into two.
147+
T UImm = static_cast<T>(MovMI->getOperand(1).getImm());
148+
T Imm1Enc;
149+
T Imm2Enc;
150+
if (!splitBitmaskImm(UImm, RegSize, Imm1Enc, Imm2Enc))
151+
return false;
152+
153+
// Create new AND MIs.
154+
DebugLoc DL = MI.getDebugLoc();
155+
Register DstReg = MI.getOperand(0).getReg();
156+
Register SrcReg = MI.getOperand(1).getReg();
157+
Register NewTmpReg1 = MRI->createVirtualRegister(
158+
(RegSize == 32) ? &AArch64::GPR32spRegClass : &AArch64::GPR64spRegClass);
159+
Register NewTmpReg2 = MRI->createVirtualRegister(MRI->getRegClass(SrcReg));
160+
Register NewTmpReg3 = MRI->createVirtualRegister(
161+
(RegSize == 32) ? &AArch64::GPR32spRegClass : &AArch64::GPR64spRegClass);
162+
unsigned Opcode = (RegSize == 32) ? AArch64::ANDWri : AArch64::ANDXri;
163+
164+
// COPY MIs are generated to align register classes as below.
165+
//
166+
// %1:gpr32 = MOVi32imm 2098176
167+
// %2:gpr32common = ANDWrr %0:gpr32, killed %1:gpr32
168+
//==>
169+
// %5:gpr32sp = ANDWri %0:gpr32, 1419
170+
// %6:gpr32 = COPY %5:gpr32sp
171+
// %7:gpr32sp = ANDWri %6:gpr32, 725
172+
// %2:gpr32common = COPY %7:gpr32sp
173+
174+
BuildMI(*MBB, MI, DL, TII->get(Opcode), NewTmpReg1)
175+
.addReg(SrcReg)
176+
.addImm(Imm1Enc);
177+
178+
// Copy from GPRsp to GPR.
179+
BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::COPY), NewTmpReg2)
180+
.addReg(NewTmpReg1);
181+
182+
BuildMI(*MBB, MI, DL, TII->get(Opcode), NewTmpReg3)
183+
.addReg(NewTmpReg2)
184+
.addImm(Imm2Enc);
185+
186+
// Copy from GPR to GPRsp.
187+
BuildMI(*MBB, MI, DL, TII->get(TargetOpcode::COPY), DstReg)
188+
.addReg(NewTmpReg3);
189+
190+
ToBeRemoved.insert(&MI);
191+
if (SubregToRegMI)
192+
ToBeRemoved.insert(SubregToRegMI);
193+
ToBeRemoved.insert(MovMI);
194+
195+
return true;
196+
}
197+
198+
bool AArch64MIPeepholeOpt::runOnMachineFunction(MachineFunction &MF) {
199+
if (skipFunction(MF.getFunction()))
200+
return false;
201+
202+
TII = static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
203+
MLI = &getAnalysis<MachineLoopInfo>();
204+
MRI = &MF.getRegInfo();
205+
206+
if (!MRI->isSSA())
207+
return false;
208+
209+
bool Changed = false;
210+
SmallSetVector<MachineInstr *, 8> ToBeRemoved;
211+
212+
for (MachineBasicBlock &MBB : MF) {
213+
for (MachineInstr &MI : MBB) {
214+
switch (MI.getOpcode()) {
215+
default:
216+
break;
217+
case AArch64::ANDWrr:
218+
Changed = visitAND<uint32_t>(MI, ToBeRemoved);
219+
break;
220+
case AArch64::ANDXrr:
221+
Changed = visitAND<uint64_t>(MI, ToBeRemoved);
222+
break;
223+
}
224+
}
225+
}
226+
227+
for (MachineInstr *MI : ToBeRemoved)
228+
MI->eraseFromParent();
229+
230+
return Changed;
231+
}
232+
233+
FunctionPass *llvm::createAArch64MIPeepholeOptPass() {
234+
return new AArch64MIPeepholeOpt();
235+
}

llvm/lib/Target/AArch64/AArch64TargetMachine.cpp

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -195,6 +195,7 @@ extern "C" LLVM_EXTERNAL_VISIBILITY void LLVMInitializeAArch64Target() {
195195
initializeAArch64DeadRegisterDefinitionsPass(*PR);
196196
initializeAArch64ExpandPseudoPass(*PR);
197197
initializeAArch64LoadStoreOptPass(*PR);
198+
initializeAArch64MIPeepholeOptPass(*PR);
198199
initializeAArch64SIMDInstrOptPass(*PR);
199200
initializeAArch64O0PreLegalizerCombinerPass(*PR);
200201
initializeAArch64PreLegalizerCombinerPass(*PR);
@@ -479,6 +480,7 @@ class AArch64PassConfig : public TargetPassConfig {
479480
bool addRegBankSelect() override;
480481
void addPreGlobalInstructionSelect() override;
481482
bool addGlobalInstructionSelect() override;
483+
void addMachineSSAOptimization() override;
482484
bool addILPOpts() override;
483485
void addPreRegAlloc() override;
484486
void addPostRegAlloc() override;
@@ -649,6 +651,14 @@ bool AArch64PassConfig::addGlobalInstructionSelect() {
649651
return false;
650652
}
651653

654+
void AArch64PassConfig::addMachineSSAOptimization() {
655+
// Run default MachineSSAOptimization first.
656+
TargetPassConfig::addMachineSSAOptimization();
657+
658+
if (TM->getOptLevel() != CodeGenOpt::None)
659+
addPass(createAArch64MIPeepholeOptPass());
660+
}
661+
652662
bool AArch64PassConfig::addILPOpts() {
653663
if (EnableCondOpt)
654664
addPass(createAArch64ConditionOptimizerPass());

llvm/lib/Target/AArch64/CMakeLists.txt

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@ add_llvm_target(AArch64CodeGen
6666
AArch64LowerHomogeneousPrologEpilog.cpp
6767
AArch64MachineFunctionInfo.cpp
6868
AArch64MacroFusion.cpp
69+
AArch64MIPeepholeOpt.cpp
6970
AArch64MCInstLower.cpp
7071
AArch64PromoteConstant.cpp
7172
AArch64PBQPRegAlloc.cpp

llvm/lib/Target/AArch64/MCTargetDesc/AArch64AddressingModes.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#ifndef LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ADDRESSINGMODES_H
1414
#define LLVM_LIB_TARGET_AARCH64_MCTARGETDESC_AARCH64ADDRESSINGMODES_H
1515

16+
#include "AArch64ExpandImm.h"
1617
#include "llvm/ADT/APFloat.h"
1718
#include "llvm/ADT/APInt.h"
1819
#include "llvm/ADT/bit.h"

llvm/test/CodeGen/AArch64/O3-pipeline.ll

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
; CHECK-NEXT: Induction Variable Users
4141
; CHECK-NEXT: Loop Strength Reduction
4242
; CHECK-NEXT: Basic Alias Analysis (stateless AA impl)
43-
; CHECK-NEXT: Function Alias Analysis Results
43+
; CHECK-NEXT: Function Alias Analysis Results
4444
; CHECK-NEXT: Merge contiguous icmps into a memcmp
4545
; CHECK-NEXT: Natural Loop Information
4646
; CHECK-NEXT: Lazy Branch Probability Analysis
@@ -131,6 +131,7 @@
131131
; CHECK-NEXT: Machine code sinking
132132
; CHECK-NEXT: Peephole Optimizations
133133
; CHECK-NEXT: Remove dead machine instructions
134+
; CHECK-NEXT: AArch64 MI Peephole Optimization pass
134135
; CHECK-NEXT: AArch64 Dead register definitions
135136
; CHECK-NEXT: Detect Dead Lanes
136137
; CHECK-NEXT: Process Implicit Definitions

0 commit comments

Comments
 (0)