diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index 96edd331eb678..68b614d1d3fdc 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -8857,14 +8857,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG, I32VL = DAG.getConstant(2 * AVLInt, DL, XLenVT); } else if (AVLInt >= 2 * MaxVLMAX) { // Just set vl to VLMAX in this situation - RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(I32VT); - SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT); - unsigned Sew = RISCVVType::encodeSEW(I32VT.getScalarSizeInBits()); - SDValue SEW = DAG.getConstant(Sew, DL, XLenVT); - SDValue SETVLMAX = DAG.getTargetConstant( - Intrinsic::riscv_vsetvlimax, DL, MVT::i32); - I32VL = DAG.getNode(ISD::INTRINSIC_WO_CHAIN, DL, XLenVT, SETVLMAX, SEW, - LMUL); + I32VL = DAG.getRegister(RISCV::X0, XLenVT); } else { // For AVL between (MinVLMAX, 2 * MaxVLMAX), the actual working vl // is related to the hardware implementation. diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll index 4115e6a91f965..fd90e67b1fb2c 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1down-constant-vl-rv32.ll @@ -51,7 +51,7 @@ define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2( @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl3( @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl8( @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl9( @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl15( @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl16( @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047(< ; ; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047: ; CHECK-512: # %bb.0: # %entry -; CHECK-512-NEXT: vsetvli a2, zero, e32, m1, ta, ma +; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, ma ; CHECK-512-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-512-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-512-NEXT: ret ; ; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2047: ; CHECK-64: # %bb.0: # %entry -; CHECK-64-NEXT: vsetvli a2, zero, e32, m1, ta, ma +; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma ; CHECK-64-NEXT: vslide1down.vx v8, v8, a0 ; CHECK-64-NEXT: vslide1down.vx v8, v8, a1 ; CHECK-64-NEXT: ret @@ -269,12 +269,26 @@ entry: } define @intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma -; CHECK-NEXT: vslide1down.vx v8, v8, a0 -; CHECK-NEXT: vslide1down.vx v8, v8, a1 -; CHECK-NEXT: ret +; CHECK-128-65536-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048: +; CHECK-128-65536: # %bb.0: # %entry +; CHECK-128-65536-NEXT: vsetvli a2, zero, e32, m1, ta, ma +; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-128-65536-NEXT: vslide1down.vx v8, v8, a1 +; CHECK-128-65536-NEXT: ret +; +; CHECK-512-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048: +; CHECK-512: # %bb.0: # %entry +; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, ma +; CHECK-512-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-512-NEXT: vslide1down.vx v8, v8, a1 +; CHECK-512-NEXT: ret +; +; CHECK-64-LABEL: intrinsic_vslide1down_vx_nxv1i64_nxv1i64_i64_vl2048: +; CHECK-64: # %bb.0: # %entry +; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma +; CHECK-64-NEXT: vslide1down.vx v8, v8, a0 +; CHECK-64-NEXT: vslide1down.vx v8, v8, a1 +; CHECK-64-NEXT: ret entry: %a = call @llvm.riscv.vslide1down.nxv1i64.i64( undef, diff --git a/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll index f0d621bef2b91..b26f1cab97c77 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vslide1up-constant-vl-rv32.ll @@ -51,7 +51,7 @@ define @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2( @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl3( @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl8( @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl9( @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl15( @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl16( @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2047( @intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2048( %0, i64 %1) nounwind { -; CHECK-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2048: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli a2, zero, e32, m1, ta, ma -; CHECK-NEXT: vslide1up.vx v9, v8, a1 -; CHECK-NEXT: vslide1up.vx v8, v9, a0 -; CHECK-NEXT: ret +; CHECK-128-65536-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2048: +; CHECK-128-65536: # %bb.0: # %entry +; CHECK-128-65536-NEXT: vsetvli a2, zero, e32, m1, ta, ma +; CHECK-128-65536-NEXT: vslide1up.vx v9, v8, a1 +; CHECK-128-65536-NEXT: vslide1up.vx v8, v9, a0 +; CHECK-128-65536-NEXT: ret +; +; CHECK-512-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2048: +; CHECK-512: # %bb.0: # %entry +; CHECK-512-NEXT: vsetivli zero, 16, e32, m1, ta, ma +; CHECK-512-NEXT: vslide1up.vx v9, v8, a1 +; CHECK-512-NEXT: vslide1up.vx v8, v9, a0 +; CHECK-512-NEXT: ret +; +; CHECK-64-LABEL: intrinsic_vslide1up_vx_nxv1i64_nxv1i64_i64_vl2048: +; CHECK-64: # %bb.0: # %entry +; CHECK-64-NEXT: vsetivli zero, 2, e32, m1, ta, ma +; CHECK-64-NEXT: vslide1up.vx v9, v8, a1 +; CHECK-64-NEXT: vslide1up.vx v8, v9, a0 +; CHECK-64-NEXT: ret entry: %a = call @llvm.riscv.vslide1up.nxv1i64.i64( undef,