Skip to content

Commit 27f0ffc

Browse files
committed
[RISCV] Handle more (add x, C) -> (sub x, -C) cases
This is a follow-up to llvm#137309, adding: - multi-use of the constant with different adds - vectors (vadd.vx -> vsub.vx)
1 parent c66ce08 commit 27f0ffc

File tree

6 files changed

+131
-2
lines changed

6 files changed

+131
-2
lines changed

llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3207,11 +3207,15 @@ bool RISCVDAGToDAGISel::selectSHXADD_UWOp(SDValue N, unsigned ShAmt,
32073207
}
32083208

32093209
bool RISCVDAGToDAGISel::selectNegImm(SDValue N, SDValue &Val) {
3210-
if (!isa<ConstantSDNode>(N) || !N.hasOneUse())
3210+
if (!isa<ConstantSDNode>(N))
32113211
return false;
32123212
int64_t Imm = cast<ConstantSDNode>(N)->getSExtValue();
32133213
if (isInt<32>(Imm))
32143214
return false;
3215+
3216+
if (any_of(N->users(), [](const SDNode *U) { return U->getOpcode() != ISD::ADD; }))
3217+
return false;
3218+
32153219
int OrigImmCost = RISCVMatInt::getIntMatCost(APInt(64, Imm), 64, *Subtarget,
32163220
/*CompressionCost=*/true);
32173221
int NegImmCost = RISCVMatInt::getIntMatCost(APInt(64, -Imm), 64, *Subtarget,

llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -6228,6 +6228,36 @@ foreach vti = AllIntegerVectors in {
62286228
}
62296229
}
62306230

6231+
// (add v, C) -> (sub v, -C) if -C cheaper to materialize
6232+
defvar I64IntegerVectors = !filter(vti, AllIntegerVectors, !eq(vti.SEW, 64));
6233+
foreach vti = I64IntegerVectors in {
6234+
let Predicates = [HasVInstructionsI64] in {
6235+
def : Pat<(vti.Vector (int_riscv_vadd (vti.Vector vti.RegClass:$passthru),
6236+
(vti.Vector vti.RegClass:$rs1),
6237+
(i64 negImm:$rs2),
6238+
VLOpFrag)),
6239+
(!cast<Instruction>("PseudoVSUB_VX_"#vti.LMul.MX)
6240+
vti.RegClass:$passthru,
6241+
vti.RegClass:$rs1,
6242+
negImm:$rs2,
6243+
GPR:$vl, vti.Log2SEW, TU_MU)>;
6244+
def : Pat<(vti.Vector (int_riscv_vadd_mask (vti.Vector vti.RegClass:$passthru),
6245+
(vti.Vector vti.RegClass:$rs1),
6246+
(i64 negImm:$rs2),
6247+
(vti.Mask VMV0:$vm),
6248+
VLOpFrag,
6249+
(i64 timm:$policy))),
6250+
(!cast<Instruction>("PseudoVSUB_VX_"#vti.LMul.MX#"_MASK")
6251+
vti.RegClass:$passthru,
6252+
vti.RegClass:$rs1,
6253+
negImm:$rs2,
6254+
(vti.Mask VMV0:$vm),
6255+
GPR:$vl,
6256+
vti.Log2SEW,
6257+
(i64 timm:$policy))>;
6258+
}
6259+
}
6260+
62316261
//===----------------------------------------------------------------------===//
62326262
// 11.2. Vector Widening Integer Add/Subtract
62336263
//===----------------------------------------------------------------------===//

llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -907,6 +907,19 @@ foreach vti = AllIntegerVectors in {
907907
}
908908
}
909909

910+
// (add v, C) -> (sub v, -C) if -C cheaper to materialize
911+
foreach vti = I64IntegerVectors in {
912+
let Predicates = [HasVInstructionsI64] in {
913+
def : Pat<(add (vti.Vector vti.RegClass:$rs1),
914+
(vti.Vector (SplatPat (i64 negImm:$rs2)))),
915+
// (riscv_vmv_v_x_vl undef, negImm:$rs2, srcvalue)),
916+
(!cast<Instruction>("PseudoVSUB_VX_"#vti.LMul.MX)
917+
(vti.Vector (IMPLICIT_DEF)),
918+
vti.RegClass:$rs1,
919+
negImm:$rs2, vti.AVL, vti.Log2SEW, TA_MA)>;
920+
}
921+
}
922+
910923
// 11.2. Vector Widening Integer Add and Subtract
911924
defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, sext_oneuse, "PseudoVWADD">;
912925
defm : VPatWidenBinarySDNode_VV_VX_WV_WX<add, zext_oneuse, "PseudoVWADDU">;

llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1957,6 +1957,19 @@ foreach vti = AllIntegerVectors in {
19571957
}
19581958
}
19591959

1960+
// (add v, C) -> (sub v, -C) if -C cheaper to materialize
1961+
foreach vti = I64IntegerVectors in {
1962+
let Predicates = [HasVInstructionsI64] in {
1963+
def : Pat<(riscv_add_vl (vti.Vector vti.RegClass:$rs1),
1964+
(vti.Vector (SplatPat (i64 negImm:$rs2))),
1965+
vti.RegClass:$passthru, (vti.Mask VMV0:$vm), VLOpFrag),
1966+
(!cast<Instruction>("PseudoVSUB_VX_"#vti.LMul.MX#"_MASK")
1967+
vti.RegClass:$passthru, vti.RegClass:$rs1,
1968+
negImm:$rs2, (vti.Mask VMV0:$vm),
1969+
GPR:$vl, vti.Log2SEW, TAIL_AGNOSTIC)>;
1970+
}
1971+
}
1972+
19601973
// 11.2. Vector Widening Integer Add/Subtract
19611974
defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwadd_vl, riscv_vwadd_w_vl, "PseudoVWADD">;
19621975
defm : VPatBinaryWVL_VV_VX_WV_WX<riscv_vwaddu_vl, riscv_vwaddu_w_vl, "PseudoVWADDU">;

llvm/test/CodeGen/RISCV/add-imm64-to-sub.ll

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,21 @@ define i64 @add_multiuse(i64 %x) {
5656
; CHECK-NEXT: and a0, a0, a1
5757
; CHECK-NEXT: ret
5858
%add = add i64 %x, -1099511627775
59-
%xor = and i64 %add, -1099511627775
59+
%and = and i64 %add, -1099511627775
60+
ret i64 %and
61+
}
62+
63+
define i64 @add_multiuse_const(i64 %x, i64 %y) {
64+
; CHECK-LABEL: add_multiuse_const:
65+
; CHECK: # %bb.0:
66+
; CHECK-NEXT: li a2, -1
67+
; CHECK-NEXT: srli a2, a2, 24
68+
; CHECK-NEXT: sub a0, a0, a2
69+
; CHECK-NEXT: sub a1, a1, a2
70+
; CHECK-NEXT: xor a0, a0, a1
71+
; CHECK-NEXT: ret
72+
%a = add i64 %x, -1099511627775
73+
%b = add i64 %y, -1099511627775
74+
%xor = xor i64 %a, %b
6075
ret i64 %xor
6176
}

llvm/test/CodeGen/RISCV/rvv/vadd-sdnode.ll

Lines changed: 54 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -865,3 +865,57 @@ define <vscale x 8 x i32> @vadd_vv_mask_negative1_nxv8i32(<vscale x 8 x i32> %va
865865
%vd = add <vscale x 8 x i32> %vc, %vs
866866
ret <vscale x 8 x i32> %vd
867867
}
868+
869+
define <vscale x 1 x i64> @vadd_vx_imm64_to_sub(<vscale x 1 x i64> %va) nounwind {
870+
; RV32-LABEL: vadd_vx_imm64_to_sub:
871+
; RV32: # %bb.0:
872+
; RV32-NEXT: addi sp, sp, -16
873+
; RV32-NEXT: li a0, -256
874+
; RV32-NEXT: li a1, 1
875+
; RV32-NEXT: sw a1, 8(sp)
876+
; RV32-NEXT: sw a0, 12(sp)
877+
; RV32-NEXT: addi a0, sp, 8
878+
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
879+
; RV32-NEXT: vlse64.v v9, (a0), zero
880+
; RV32-NEXT: vadd.vv v8, v8, v9
881+
; RV32-NEXT: addi sp, sp, 16
882+
; RV32-NEXT: ret
883+
;
884+
; RV64-LABEL: vadd_vx_imm64_to_sub:
885+
; RV64: # %bb.0:
886+
; RV64-NEXT: li a0, -1
887+
; RV64-NEXT: slli a0, a0, 40
888+
; RV64-NEXT: addi a0, a0, 1
889+
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
890+
; RV64-NEXT: vadd.vx v8, v8, a0
891+
; RV64-NEXT: ret
892+
%vc = add <vscale x 1 x i64> splat (i64 -1099511627775), %va
893+
ret <vscale x 1 x i64> %vc
894+
}
895+
896+
define <vscale x 1 x i64> @vadd_vx_imm64_to_sub_swapped(<vscale x 1 x i64> %va) nounwind {
897+
; RV32-LABEL: vadd_vx_imm64_to_sub_swapped:
898+
; RV32: # %bb.0:
899+
; RV32-NEXT: addi sp, sp, -16
900+
; RV32-NEXT: li a0, -256
901+
; RV32-NEXT: li a1, 1
902+
; RV32-NEXT: sw a1, 8(sp)
903+
; RV32-NEXT: sw a0, 12(sp)
904+
; RV32-NEXT: addi a0, sp, 8
905+
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
906+
; RV32-NEXT: vlse64.v v9, (a0), zero
907+
; RV32-NEXT: vadd.vv v8, v8, v9
908+
; RV32-NEXT: addi sp, sp, 16
909+
; RV32-NEXT: ret
910+
;
911+
; RV64-LABEL: vadd_vx_imm64_to_sub_swapped:
912+
; RV64: # %bb.0:
913+
; RV64-NEXT: li a0, -1
914+
; RV64-NEXT: slli a0, a0, 40
915+
; RV64-NEXT: addi a0, a0, 1
916+
; RV64-NEXT: vsetvli a1, zero, e64, m1, ta, ma
917+
; RV64-NEXT: vadd.vx v8, v8, a0
918+
; RV64-NEXT: ret
919+
%vc = add <vscale x 1 x i64> %va, splat (i64 -1099511627775)
920+
ret <vscale x 1 x i64> %vc
921+
}

0 commit comments

Comments
 (0)