diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll index a79180ed45b42..5dead46581a0a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert.ll @@ -627,6 +627,41 @@ define void @add_v16i64(ptr %x, ptr %y) vscale_range(2,2) { ret void } +define @fp_reduction_vfmv_s_f(float %0, %1, i64 %2) { +; CHECK-LABEL: fp_reduction_vfmv_s_f: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vfmv.s.f v12, fa0 +; CHECK-NEXT: vfredusum.vs v8, v8, v12 +; CHECK-NEXT: ret + %4 = tail call @llvm.riscv.vfmv.s.f.nxv8f32.i64( poison, float %0, i64 %2) + %5 = tail call @llvm.vector.extract.nxv2f32.nxv8f32( %4, i64 0) + %6 = tail call @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64( poison, %1, %5, i64 7, i64 %2) + ret %6 +} + +define dso_local @int_reduction_vmv_s_x(i32 signext %0, %1, i64 %2) { +; CHECK-LABEL: int_reduction_vmv_s_x: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma +; CHECK-NEXT: vmv.s.x v12, a0 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vredsum.vs v8, v8, v12 +; CHECK-NEXT: ret + %4 = tail call @llvm.riscv.vmv.s.x.nxv8i32.i64( poison, i32 %0, i64 %2) + %5 = tail call @llvm.vector.extract.nxv2i32.nxv8i32( %4, i64 0) + %6 = tail call @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64( poison, %1, %5, i64 %2) + ret %6 +} + +declare @llvm.riscv.vfmv.s.f.nxv8f32.i64(, float, i64) +declare @llvm.vector.extract.nxv2f32.nxv8f32(, i64) +declare @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64(, , , i64, i64) + +declare @llvm.riscv.vmv.s.x.nxv8i32.i64(, i32, i64) #1 +declare @llvm.vector.extract.nxv2i32.nxv8i32(, i64 immarg) #2 +declare @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(, , , i64) #1 + declare @llvm.riscv.vadd.mask.nxv1i64.nxv1i64( , ,