Skip to content

Commit 3564c85

Browse files
authored
[RISCV] Eliminate dead li after emitting VSETVLIs (#65934)
This patch tracks li instructions that set AVL operands and does DCE after emitting VSETVLIs.
1 parent b81c694 commit 3564c85

File tree

2 files changed

+11
-24
lines changed

2 files changed

+11
-24
lines changed

llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1291,9 +1291,20 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
12911291
if (RISCVII::hasVLOp(TSFlags)) {
12921292
MachineOperand &VLOp = MI.getOperand(getVLOpNum(MI));
12931293
if (VLOp.isReg()) {
1294+
Register Reg = VLOp.getReg();
1295+
MachineInstr *VLOpDef = MRI->getVRegDef(Reg);
1296+
12941297
// Erase the AVL operand from the instruction.
12951298
VLOp.setReg(RISCV::NoRegister);
12961299
VLOp.setIsKill(false);
1300+
1301+
// If the AVL was an immediate > 31, then it would have been emitted
1302+
// as an ADDI. However, the ADDI might not have been used in the
1303+
// vsetvli, or a vsetvli might not have been emitted, so it may be
1304+
// dead now.
1305+
if (VLOpDef && TII->isAddImmediate(*VLOpDef, Reg) &&
1306+
MRI->use_nodbg_empty(Reg))
1307+
VLOpDef->eraseFromParent();
12971308
}
12981309
MI.addOperand(MachineOperand::CreateReg(RISCV::VL, /*isDef*/ false,
12991310
/*isImp*/ true));

llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll

Lines changed: 0 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -12432,7 +12432,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1243212432
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
1243312433
; RV64ZVE32F-NEXT: add a2, a0, a2
1243412434
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12435-
; RV64ZVE32F-NEXT: li a3, 32
1243612435
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
1243712436
; RV64ZVE32F-NEXT: vsetivli zero, 2, e8, m1, tu, ma
1243812437
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 1
@@ -12470,7 +12469,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1247012469
; RV64ZVE32F-NEXT: vmv.x.s a2, v14
1247112470
; RV64ZVE32F-NEXT: add a2, a0, a2
1247212471
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12473-
; RV64ZVE32F-NEXT: li a3, 32
1247412472
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
1247512473
; RV64ZVE32F-NEXT: vsetivli zero, 6, e8, m1, tu, ma
1247612474
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 5
@@ -12494,7 +12492,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1249412492
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
1249512493
; RV64ZVE32F-NEXT: add a2, a0, a2
1249612494
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12497-
; RV64ZVE32F-NEXT: li a3, 32
1249812495
; RV64ZVE32F-NEXT: vmv.s.x v13, a2
1249912496
; RV64ZVE32F-NEXT: vsetivli zero, 10, e8, m1, tu, ma
1250012497
; RV64ZVE32F-NEXT: vslideup.vi v10, v13, 9
@@ -12509,7 +12506,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1250912506
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
1251012507
; RV64ZVE32F-NEXT: add a2, a0, a2
1251112508
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12512-
; RV64ZVE32F-NEXT: li a3, 32
1251312509
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
1251412510
; RV64ZVE32F-NEXT: vsetivli zero, 11, e8, m1, tu, ma
1251512511
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 10
@@ -12522,7 +12518,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1252212518
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
1252312519
; RV64ZVE32F-NEXT: add a2, a0, a2
1252412520
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12525-
; RV64ZVE32F-NEXT: li a3, 32
1252612521
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
1252712522
; RV64ZVE32F-NEXT: vsetivli zero, 12, e8, m1, tu, ma
1252812523
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 11
@@ -12535,7 +12530,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1253512530
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
1253612531
; RV64ZVE32F-NEXT: add a2, a0, a2
1253712532
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12538-
; RV64ZVE32F-NEXT: li a3, 32
1253912533
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
1254012534
; RV64ZVE32F-NEXT: vsetivli zero, 13, e8, m1, tu, ma
1254112535
; RV64ZVE32F-NEXT: vslideup.vi v10, v9, 12
@@ -12548,7 +12542,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1254812542
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
1254912543
; RV64ZVE32F-NEXT: add a2, a0, a2
1255012544
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12551-
; RV64ZVE32F-NEXT: li a3, 32
1255212545
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
1255312546
; RV64ZVE32F-NEXT: vsetivli zero, 14, e8, m1, tu, ma
1255412547
; RV64ZVE32F-NEXT: vslideup.vi v10, v9, 13
@@ -12572,7 +12565,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1257212565
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
1257312566
; RV64ZVE32F-NEXT: add a2, a0, a2
1257412567
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12575-
; RV64ZVE32F-NEXT: li a3, 32
1257612568
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
1257712569
; RV64ZVE32F-NEXT: vsetivli zero, 18, e8, m2, tu, ma
1257812570
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 17
@@ -12610,7 +12602,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1261012602
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
1261112603
; RV64ZVE32F-NEXT: add a2, a0, a2
1261212604
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12613-
; RV64ZVE32F-NEXT: li a3, 32
1261412605
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
1261512606
; RV64ZVE32F-NEXT: vsetivli zero, 22, e8, m2, tu, ma
1261612607
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 21
@@ -12634,7 +12625,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1263412625
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
1263512626
; RV64ZVE32F-NEXT: add a2, a0, a2
1263612627
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12637-
; RV64ZVE32F-NEXT: li a3, 32
1263812628
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
1263912629
; RV64ZVE32F-NEXT: vsetivli zero, 26, e8, m2, tu, ma
1264012630
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 25
@@ -12660,7 +12650,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1266012650
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
1266112651
; RV64ZVE32F-NEXT: add a2, a0, a2
1266212652
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12663-
; RV64ZVE32F-NEXT: li a3, 32
1266412653
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
1266512654
; RV64ZVE32F-NEXT: vsetivli zero, 30, e8, m2, tu, ma
1266612655
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 29
@@ -12673,7 +12662,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1267312662
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
1267412663
; RV64ZVE32F-NEXT: add a2, a0, a2
1267512664
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12676-
; RV64ZVE32F-NEXT: li a3, 32
1267712665
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
1267812666
; RV64ZVE32F-NEXT: vsetivli zero, 31, e8, m2, tu, ma
1267912667
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 30
@@ -12698,7 +12686,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1269812686
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
1269912687
; RV64ZVE32F-NEXT: add a2, a0, a2
1270012688
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12701-
; RV64ZVE32F-NEXT: li a3, 32
1270212689
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
1270312690
; RV64ZVE32F-NEXT: vsetivli zero, 3, e8, m1, tu, ma
1270412691
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 2
@@ -12710,7 +12697,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1271012697
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
1271112698
; RV64ZVE32F-NEXT: add a2, a0, a2
1271212699
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12713-
; RV64ZVE32F-NEXT: li a3, 32
1271412700
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
1271512701
; RV64ZVE32F-NEXT: vsetivli zero, 4, e8, m1, tu, ma
1271612702
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 3
@@ -12721,7 +12707,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1272112707
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
1272212708
; RV64ZVE32F-NEXT: add a2, a0, a2
1272312709
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12724-
; RV64ZVE32F-NEXT: li a3, 32
1272512710
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
1272612711
; RV64ZVE32F-NEXT: vsetivli zero, 7, e8, m1, tu, ma
1272712712
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 6
@@ -12733,7 +12718,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1273312718
; RV64ZVE32F-NEXT: vmv.x.s a2, v13
1273412719
; RV64ZVE32F-NEXT: add a2, a0, a2
1273512720
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12736-
; RV64ZVE32F-NEXT: li a3, 32
1273712721
; RV64ZVE32F-NEXT: vmv.s.x v13, a2
1273812722
; RV64ZVE32F-NEXT: vsetivli zero, 8, e8, m1, tu, ma
1273912723
; RV64ZVE32F-NEXT: vslideup.vi v10, v13, 7
@@ -12756,7 +12740,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1275612740
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
1275712741
; RV64ZVE32F-NEXT: add a2, a0, a2
1275812742
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12759-
; RV64ZVE32F-NEXT: li a3, 32
1276012743
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
1276112744
; RV64ZVE32F-NEXT: vsetivli zero, 15, e8, m1, tu, ma
1276212745
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 14
@@ -12768,7 +12751,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1276812751
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
1276912752
; RV64ZVE32F-NEXT: add a2, a0, a2
1277012753
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12771-
; RV64ZVE32F-NEXT: li a3, 32
1277212754
; RV64ZVE32F-NEXT: vmv.s.x v9, a2
1277312755
; RV64ZVE32F-NEXT: vsetivli zero, 16, e8, m1, tu, ma
1277412756
; RV64ZVE32F-NEXT: vslideup.vi v10, v9, 15
@@ -12791,7 +12773,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1279112773
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
1279212774
; RV64ZVE32F-NEXT: add a2, a0, a2
1279312775
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12794-
; RV64ZVE32F-NEXT: li a3, 32
1279512776
; RV64ZVE32F-NEXT: vmv.s.x v14, a2
1279612777
; RV64ZVE32F-NEXT: vsetivli zero, 19, e8, m2, tu, ma
1279712778
; RV64ZVE32F-NEXT: vslideup.vi v10, v14, 18
@@ -12803,7 +12784,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1280312784
; RV64ZVE32F-NEXT: vmv.x.s a2, v12
1280412785
; RV64ZVE32F-NEXT: add a2, a0, a2
1280512786
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12806-
; RV64ZVE32F-NEXT: li a3, 32
1280712787
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
1280812788
; RV64ZVE32F-NEXT: vsetivli zero, 20, e8, m2, tu, ma
1280912789
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 19
@@ -12814,7 +12794,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1281412794
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
1281512795
; RV64ZVE32F-NEXT: add a2, a0, a2
1281612796
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12817-
; RV64ZVE32F-NEXT: li a3, 32
1281812797
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
1281912798
; RV64ZVE32F-NEXT: vsetivli zero, 23, e8, m2, tu, ma
1282012799
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 22
@@ -12826,7 +12805,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1282612805
; RV64ZVE32F-NEXT: vmv.x.s a2, v9
1282712806
; RV64ZVE32F-NEXT: add a2, a0, a2
1282812807
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12829-
; RV64ZVE32F-NEXT: li a3, 32
1283012808
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
1283112809
; RV64ZVE32F-NEXT: vsetivli zero, 24, e8, m2, tu, ma
1283212810
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 23
@@ -12849,7 +12827,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1284912827
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
1285012828
; RV64ZVE32F-NEXT: add a2, a0, a2
1285112829
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12852-
; RV64ZVE32F-NEXT: li a3, 32
1285312830
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
1285412831
; RV64ZVE32F-NEXT: vsetivli zero, 27, e8, m2, tu, ma
1285512832
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 26
@@ -12861,7 +12838,6 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
1286112838
; RV64ZVE32F-NEXT: vmv.x.s a2, v8
1286212839
; RV64ZVE32F-NEXT: add a2, a0, a2
1286312840
; RV64ZVE32F-NEXT: lbu a2, 0(a2)
12864-
; RV64ZVE32F-NEXT: li a3, 32
1286512841
; RV64ZVE32F-NEXT: vmv.s.x v12, a2
1286612842
; RV64ZVE32F-NEXT: vsetivli zero, 28, e8, m2, tu, ma
1286712843
; RV64ZVE32F-NEXT: vslideup.vi v10, v12, 27

0 commit comments

Comments
 (0)