Skip to content

[RISCV] Narrow indices of fixed vector gather/scatter nodes #66405

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Closed
wants to merge 1 commit into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 29 additions & 6 deletions llvm/lib/Target/RISCV/RISCVISelLowering.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11628,15 +11628,39 @@ static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG,
return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false, Subtarget);
}

// According to the property that indexed load/store instructions
// zero-extended their indices, \p narrowIndex tries to narrow the type of index
// operand if it is matched to pattern (shl (zext x to ty), C) and bits(x) + C <
// bits(ty).
/// According to the property that indexed load/store instructions zero-extend
/// their indices, try to narrow the type of index operand.
static bool narrowIndex(SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &DAG) {
if (isIndexTypeSigned(IndexType))
return false;

if (N.getOpcode() != ISD::SHL || !N->hasOneUse())
if (!N->hasOneUse())
return false;

EVT VT = N.getValueType();
SDLoc DL(N);

// In general, what we're doing here is seeing if we can sink a truncate to
// a smaller element type into the expression tree building our index.
// TODO: We can generalize this and handle a bunch more cases if useful.

// Narrow a buildvector to the narrowest element type. This requires less
// work and less register pressure at high LMUL, and creates smaller constants
// which may be cheaper to materialize.
if (ISD::isBuildVectorOfConstantSDNodes(N.getNode())) {
KnownBits Known = DAG.computeKnownBits(N);
unsigned ActiveBits = std::max(8u, Known.countMaxActiveBits());
LLVMContext &C = *DAG.getContext();
EVT ResultVT = EVT::getIntegerVT(C, ActiveBits).getRoundIntegerType(C);
if (ResultVT.bitsLT(VT.getVectorElementType())) {
N = DAG.getNode(ISD::TRUNCATE, DL,
VT.changeVectorElementType(ResultVT), N);
return true;
}
}

// Handle the pattern (shl (zext x to ty), C) and bits(x) + C < bits(ty).
if (N.getOpcode() != ISD::SHL)
return false;

SDValue N0 = N.getOperand(0);
Expand All @@ -11651,7 +11675,6 @@ static bool narrowIndex(SDValue &N, ISD::MemIndexType IndexType, SelectionDAG &D
if (!ISD::isConstantSplatVector(N1.getNode(), ShAmt))
return false;;

SDLoc DL(N);
SDValue Src = N0.getOperand(0);
EVT SrcVT = Src.getValueType();
unsigned SrcElen = SrcVT.getScalarSizeInBits();
Expand Down
68 changes: 34 additions & 34 deletions llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
Original file line number Diff line number Diff line change
Expand Up @@ -12857,10 +12857,10 @@ define <4 x i32> @mgather_broadcast_load_unmasked(ptr %base) {
;
; RV64V-LABEL: mgather_broadcast_load_unmasked:
; RV64V: # %bb.0:
; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64V-NEXT: vmv.v.i v10, 0
; RV64V-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64V-NEXT: vmv.v.i v9, 0
; RV64V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64V-NEXT: vluxei64.v v8, (a0), v10
; RV64V-NEXT: vluxei8.v v8, (a0), v9
; RV64V-NEXT: ret
;
; RV64ZVE32F-LABEL: mgather_broadcast_load_unmasked:
Expand Down Expand Up @@ -12949,10 +12949,10 @@ define <4 x i32> @mgather_broadcast_load_masked(ptr %base, <4 x i1> %m) {
;
; RV64V-LABEL: mgather_broadcast_load_masked:
; RV64V: # %bb.0:
; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64V-NEXT: vmv.v.i v10, 0
; RV64V-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64V-NEXT: vmv.v.i v9, 0
; RV64V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64V-NEXT: vluxei64.v v8, (a0), v10, v0.t
; RV64V-NEXT: vluxei8.v v8, (a0), v9, v0.t
; RV64V-NEXT: ret
;
; RV64ZVE32F-LABEL: mgather_broadcast_load_masked:
Expand Down Expand Up @@ -13016,11 +13016,11 @@ define <4 x i32> @mgather_unit_stride_load(ptr %base) {
;
; RV64V-LABEL: mgather_unit_stride_load:
; RV64V: # %bb.0:
; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64V-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64V-NEXT: vid.v v8
; RV64V-NEXT: vsll.vi v10, v8, 2
; RV64V-NEXT: vsll.vi v9, v8, 2
; RV64V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64V-NEXT: vluxei64.v v8, (a0), v10
; RV64V-NEXT: vluxei8.v v8, (a0), v9
; RV64V-NEXT: ret
;
; RV64ZVE32F-LABEL: mgather_unit_stride_load:
Expand Down Expand Up @@ -13089,11 +13089,9 @@ define <4 x i32> @mgather_unit_stride_load_with_offset(ptr %base) {
; RV64V: # %bb.0:
; RV64V-NEXT: lui a1, 115073
; RV64V-NEXT: addiw a1, a1, 1040
; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64V-NEXT: vmv.s.x v8, a1
; RV64V-NEXT: vsext.vf8 v10, v8
; RV64V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64V-NEXT: vluxei64.v v8, (a0), v10
; RV64V-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV64V-NEXT: vmv.s.x v9, a1
; RV64V-NEXT: vluxei8.v v8, (a0), v9
; RV64V-NEXT: ret
;
; RV64ZVE32F-LABEL: mgather_unit_stride_load_with_offset:
Expand Down Expand Up @@ -13153,19 +13151,20 @@ define <4 x i32> @mgather_unit_stride_load_with_offset(ptr %base) {
define <4 x i32> @mgather_unit_stride_load_narrow_idx(ptr %base) {
; RV32-LABEL: mgather_unit_stride_load_narrow_idx:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV32-NEXT: vid.v v8
; RV32-NEXT: vsll.vi v8, v8, 2
; RV32-NEXT: vluxei32.v v8, (a0), v8
; RV32-NEXT: vsll.vi v9, v8, 2
; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32-NEXT: vluxei8.v v8, (a0), v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_unit_stride_load_narrow_idx:
; RV64V: # %bb.0:
; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64V-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64V-NEXT: vid.v v8
; RV64V-NEXT: vsll.vi v10, v8, 2
; RV64V-NEXT: vsll.vi v9, v8, 2
; RV64V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64V-NEXT: vluxei64.v v8, (a0), v10
; RV64V-NEXT: vluxei8.v v8, (a0), v9
; RV64V-NEXT: ret
;
; RV64ZVE32F-LABEL: mgather_unit_stride_load_narrow_idx:
Expand Down Expand Up @@ -13224,19 +13223,20 @@ define <4 x i32> @mgather_unit_stride_load_narrow_idx(ptr %base) {
define <4 x i32> @mgather_unit_stride_load_wide_idx(ptr %base) {
; RV32-LABEL: mgather_unit_stride_load_wide_idx:
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV32-NEXT: vid.v v8
; RV32-NEXT: vsll.vi v8, v8, 2
; RV32-NEXT: vluxei32.v v8, (a0), v8
; RV32-NEXT: vsll.vi v9, v8, 2
; RV32-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV32-NEXT: vluxei8.v v8, (a0), v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_unit_stride_load_wide_idx:
; RV64V: # %bb.0:
; RV64V-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64V-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RV64V-NEXT: vid.v v8
; RV64V-NEXT: vsll.vi v10, v8, 2
; RV64V-NEXT: vsll.vi v9, v8, 2
; RV64V-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; RV64V-NEXT: vluxei64.v v8, (a0), v10
; RV64V-NEXT: vluxei8.v v8, (a0), v9
; RV64V-NEXT: ret
;
; RV64ZVE32F-LABEL: mgather_unit_stride_load_wide_idx:
Expand Down Expand Up @@ -13374,17 +13374,17 @@ define <8 x i16> @mgather_strided_2xSEW(ptr %base) {
; RV32-NEXT: lui a1, %hi(.LCPI107_0)
; RV32-NEXT: addi a1, a1, %lo(.LCPI107_0)
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV32-NEXT: vle32.v v10, (a1)
; RV32-NEXT: vluxei32.v v8, (a0), v10
; RV32-NEXT: vle8.v v9, (a1)
; RV32-NEXT: vluxei8.v v8, (a0), v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_strided_2xSEW:
; RV64V: # %bb.0:
; RV64V-NEXT: lui a1, %hi(.LCPI107_0)
; RV64V-NEXT: addi a1, a1, %lo(.LCPI107_0)
; RV64V-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64V-NEXT: vle64.v v12, (a1)
; RV64V-NEXT: vluxei64.v v8, (a0), v12
; RV64V-NEXT: vle8.v v9, (a1)
; RV64V-NEXT: vluxei8.v v8, (a0), v9
; RV64V-NEXT: ret
;
; RV64ZVE32F-LABEL: mgather_strided_2xSEW:
Expand Down Expand Up @@ -13491,17 +13491,17 @@ define <8 x i16> @mgather_gather_2xSEW(ptr %base) {
; RV32-NEXT: lui a1, %hi(.LCPI108_0)
; RV32-NEXT: addi a1, a1, %lo(.LCPI108_0)
; RV32-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV32-NEXT: vle32.v v10, (a1)
; RV32-NEXT: vluxei32.v v8, (a0), v10
; RV32-NEXT: vle8.v v9, (a1)
; RV32-NEXT: vluxei8.v v8, (a0), v9
; RV32-NEXT: ret
;
; RV64V-LABEL: mgather_gather_2xSEW:
; RV64V: # %bb.0:
; RV64V-NEXT: lui a1, %hi(.LCPI108_0)
; RV64V-NEXT: addi a1, a1, %lo(.LCPI108_0)
; RV64V-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; RV64V-NEXT: vle64.v v12, (a1)
; RV64V-NEXT: vluxei64.v v8, (a0), v12
; RV64V-NEXT: vle8.v v9, (a1)
; RV64V-NEXT: vluxei8.v v8, (a0), v9
; RV64V-NEXT: ret
;
; RV64ZVE32F-LABEL: mgather_gather_2xSEW:
Expand Down