diff --git a/llvm/include/llvm/CodeGen/BasicTTIImpl.h b/llvm/include/llvm/CodeGen/BasicTTIImpl.h index 3dd16dafe3c42..a90ea0ba32ffb 100644 --- a/llvm/include/llvm/CodeGen/BasicTTIImpl.h +++ b/llvm/include/llvm/CodeGen/BasicTTIImpl.h @@ -1691,7 +1691,69 @@ class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase { } } - // Assume that we need to scalarize this intrinsic. + // VP Intrinsics should have the same cost as their non-vp counterpart. + // TODO: Adjust the cost to make the vp intrinsic cheaper than its non-vp + // counterpart when the vector length argument is smaller than the maximum + // vector length. + // TODO: Support other kinds of VPIntrinsics + if (VPIntrinsic::isVPIntrinsic(ICA.getID())) { + std::optional FOp = + VPIntrinsic::getFunctionalOpcodeForVP(ICA.getID()); + if (FOp) { + if (ICA.getID() == Intrinsic::vp_load) { + Align Alignment; + if (auto *VPI = dyn_cast_or_null(ICA.getInst())) + Alignment = VPI->getPointerAlignment().valueOrOne(); + unsigned AS = 0; + if (ICA.getArgs().size() > 1) + if (auto *PtrTy = + dyn_cast(ICA.getArgs()[0]->getType())) + AS = PtrTy->getAddressSpace(); + return thisT()->getMemoryOpCost(*FOp, ICA.getReturnType(), Alignment, + AS, CostKind); + } + if (ICA.getID() == Intrinsic::vp_store) { + Align Alignment; + if (auto *VPI = dyn_cast_or_null(ICA.getInst())) + Alignment = VPI->getPointerAlignment().valueOrOne(); + unsigned AS = 0; + if (ICA.getArgs().size() >= 2) + if (auto *PtrTy = + dyn_cast(ICA.getArgs()[1]->getType())) + AS = PtrTy->getAddressSpace(); + return thisT()->getMemoryOpCost(*FOp, Args[0]->getType(), Alignment, + AS, CostKind); + } + if (VPBinOpIntrinsic::isVPBinOp(ICA.getID())) { + return thisT()->getArithmeticInstrCost(*FOp, ICA.getReturnType(), + CostKind); + } + } + + std::optional FID = + VPIntrinsic::getFunctionalIntrinsicIDForVP(ICA.getID()); + if (FID) { + // Non-vp version will have same Args/Tys except mask and vector length. + assert(ICA.getArgs().size() >= 2 && ICA.getArgTypes().size() >= 2 && + "Expected VPIntrinsic to have Mask and Vector Length args and " + "types"); + ArrayRef NewTys = ArrayRef(ICA.getArgTypes()).drop_back(2); + + // VPReduction intrinsics have a start value argument that their non-vp + // counterparts do not have, except for the fadd and fmul non-vp + // counterpart. + if (VPReductionIntrinsic::isVPReduction(ICA.getID()) && + *FID != Intrinsic::vector_reduce_fadd && + *FID != Intrinsic::vector_reduce_fmul) + NewTys = NewTys.drop_front(); + + IntrinsicCostAttributes NewICA(*FID, ICA.getReturnType(), NewTys, + ICA.getFlags()); + return thisT()->getIntrinsicInstrCost(NewICA, CostKind); + } + } + + // Assume that we need to scalarize this intrinsic.) // Compute the scalarization overhead based on Args for a vector // intrinsic. InstructionCost ScalarizationCost = InstructionCost::getInvalid(); diff --git a/llvm/test/Analysis/CostModel/RISCV/gep.ll b/llvm/test/Analysis/CostModel/RISCV/gep.ll index be518faf7e051..4fadf34c1973f 100644 --- a/llvm/test/Analysis/CostModel/RISCV/gep.ll +++ b/llvm/test/Analysis/CostModel/RISCV/gep.ll @@ -270,7 +270,7 @@ define void @non_foldable_vector_uses(ptr %base, <2 x ptr> %base.vec) { ; RVI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %4 = getelementptr i8, ptr %base, i32 42 ; RVI-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %x4 = call <2 x i8> @llvm.masked.expandload.v2i8(ptr %4, <2 x i1> undef, <2 x i8> undef) ; RVI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %5 = getelementptr i8, ptr %base, i32 42 -; RVI-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %x5 = call <2 x i8> @llvm.vp.load.v2i8.p0(ptr %5, <2 x i1> undef, i32 undef) +; RVI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %x5 = call <2 x i8> @llvm.vp.load.v2i8.p0(ptr %5, <2 x i1> undef, i32 undef) ; RVI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %6 = getelementptr i8, ptr %base, i32 42 ; RVI-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %x6 = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr %6, i64 undef, <2 x i1> undef, i32 undef) ; RVI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %7 = getelementptr i8, ptr %base, i32 42 @@ -282,7 +282,7 @@ define void @non_foldable_vector_uses(ptr %base, <2 x ptr> %base.vec) { ; RVI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %10 = getelementptr i8, ptr %base, i32 42 ; RVI-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.masked.compressstore.v2i8(<2 x i8> undef, ptr %10, <2 x i1> undef) ; RVI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %11 = getelementptr i8, ptr %base, i32 42 -; RVI-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.vp.store.v2i8.p0(<2 x i8> undef, ptr %11, <2 x i1> undef, i32 undef) +; RVI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: call void @llvm.vp.store.v2i8.p0(<2 x i8> undef, ptr %11, <2 x i1> undef, i32 undef) ; RVI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %12 = getelementptr i8, ptr %base, i32 42 ; RVI-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vp.strided.store.v2i8.p0.i64(<2 x i8> undef, ptr %12, i64 undef, <2 x i1> undef, i32 undef) ; RVI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void @@ -340,7 +340,7 @@ define void @foldable_vector_uses(ptr %base, <2 x ptr> %base.vec) { ; RVI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %4 = getelementptr i8, ptr %base, i32 0 ; RVI-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %x4 = call <2 x i8> @llvm.masked.expandload.v2i8(ptr %4, <2 x i1> undef, <2 x i8> undef) ; RVI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %5 = getelementptr i8, ptr %base, i32 0 -; RVI-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %x5 = call <2 x i8> @llvm.vp.load.v2i8.p0(ptr %5, <2 x i1> undef, i32 undef) +; RVI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %x5 = call <2 x i8> @llvm.vp.load.v2i8.p0(ptr %5, <2 x i1> undef, i32 undef) ; RVI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %6 = getelementptr i8, ptr %base, i32 0 ; RVI-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %x6 = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0.i64(ptr %6, i64 undef, <2 x i1> undef, i32 undef) ; RVI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %7 = getelementptr i8, ptr %base, i32 0 @@ -352,7 +352,7 @@ define void @foldable_vector_uses(ptr %base, <2 x ptr> %base.vec) { ; RVI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %10 = getelementptr i8, ptr %base, i32 0 ; RVI-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.masked.compressstore.v2i8(<2 x i8> undef, ptr %10, <2 x i1> undef) ; RVI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %11 = getelementptr i8, ptr %base, i32 0 -; RVI-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.vp.store.v2i8.p0(<2 x i8> undef, ptr %11, <2 x i1> undef, i32 undef) +; RVI-NEXT: Cost Model: Found an estimated cost of 1 for instruction: call void @llvm.vp.store.v2i8.p0(<2 x i8> undef, ptr %11, <2 x i1> undef, i32 undef) ; RVI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: %12 = getelementptr i8, ptr %base, i32 0 ; RVI-NEXT: Cost Model: Found an estimated cost of 12 for instruction: call void @llvm.experimental.vp.strided.store.v2i8.p0.i64(<2 x i8> undef, ptr %12, i64 undef, <2 x i1> undef, i32 undef) ; RVI-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void diff --git a/llvm/test/Analysis/CostModel/RISCV/rvv-intrinsics.ll b/llvm/test/Analysis/CostModel/RISCV/rvv-intrinsics.ll index 93de623cf1c6d..43a03404e8db6 100644 --- a/llvm/test/Analysis/CostModel/RISCV/rvv-intrinsics.ll +++ b/llvm/test/Analysis/CostModel/RISCV/rvv-intrinsics.ll @@ -206,10 +206,588 @@ define void @vp_fshl() { ret void } +define void @add() { +; CHECK-LABEL: 'add' +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t0 = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> undef, <2 x i8> undef, <2 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t1 = add <2 x i8> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t2 = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> undef, <4 x i8> undef, <4 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t3 = add <4 x i8> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t4 = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> undef, <8 x i8> undef, <8 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t5 = add <8 x i8> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t6 = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> undef, <16 x i8> undef, <16 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t7 = add <16 x i8> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t8 = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> undef, <2 x i64> undef, <2 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t9 = add <2 x i64> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %t10 = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> undef, <4 x i64> undef, <4 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %t12 = add <4 x i64> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %t13 = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> undef, <8 x i64> undef, <8 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %t14 = add <8 x i64> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %t15 = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> undef, <16 x i64> undef, <16 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %t16 = add <16 x i64> undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t17 = call @llvm.vp.add.nxv2i8( undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t18 = add undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t19 = call @llvm.vp.add.nxv4i8( undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t20 = add undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t21 = call @llvm.vp.add.nxv8i8( undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t22 = add undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %t23 = call @llvm.vp.add.nxv16i8( undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %t24 = add undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %t25 = call @llvm.vp.add.nxv2i64( undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %t26 = add undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %t27 = call @llvm.vp.add.nxv4i64( undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %t28 = add undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %t29 = call @llvm.vp.add.nxv8i64( undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %t30 = add undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %t31 = call @llvm.vp.add.nxv16i64( undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %t32 = add undef, undef +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + %t0 = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> undef, <2 x i8> undef, <2 x i1> undef, i32 undef) + %t1 = add <2 x i8> undef, undef + %t2 = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> undef, <4 x i8> undef, <4 x i1> undef, i32 undef) + %t3 = add <4 x i8> undef, undef + %t4 = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> undef, <8 x i8> undef, <8 x i1> undef, i32 undef) + %t5 = add <8 x i8> undef, undef + %t6 = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> undef, <16 x i8> undef, <16 x i1> undef, i32 undef) + %t7 = add <16 x i8> undef, undef + %t8 = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> undef, <2 x i64> undef, <2 x i1> undef, i32 undef) + %t9 = add <2 x i64> undef, undef + %t10 = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> undef, <4 x i64> undef, <4 x i1> undef, i32 undef) + %t12 = add <4 x i64> undef, undef + %t13 = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> undef, <8 x i64> undef, <8 x i1> undef, i32 undef) + %t14 = add <8 x i64> undef, undef + %t15 = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> undef, <16 x i64> undef, <16 x i1> undef, i32 undef) + %t16 = add <16 x i64> undef, undef + %t17 = call @llvm.vp.add.nxv2i8( undef, undef, undef, i32 undef) + %t18 = add undef, undef + %t19 = call @llvm.vp.add.nxv4i8( undef, undef, undef, i32 undef) + %t20 = add undef, undef + %t21 = call @llvm.vp.add.nxv8i8( undef, undef, undef, i32 undef) + %t22 = add undef, undef + %t23 = call @llvm.vp.add.nxv16i8( undef, undef, undef, i32 undef) + %t24 = add undef, undef + %t25 = call @llvm.vp.add.nxv2i64( undef, undef, undef, i32 undef) + %t26 = add undef, undef + %t27 = call @llvm.vp.add.nxv4i64( undef, undef, undef, i32 undef) + %t28 = add undef, undef + %t29 = call @llvm.vp.add.nxv8i64( undef, undef, undef, i32 undef) + %t30 = add undef, undef + %t31 = call @llvm.vp.add.nxv16i64( undef, undef, undef, i32 undef) + %t32 = add undef, undef + ret void +} + +define void @abs() { +; CHECK-LABEL: 'abs' +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %1 = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> undef, i1 false, <2 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %2 = call <2 x i8> @llvm.abs.v2i8(<2 x i8> undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %3 = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> undef, i1 false, <4 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %4 = call <4 x i8> @llvm.abs.v4i8(<4 x i8> undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %5 = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> undef, i1 false, <8 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %6 = call <8 x i8> @llvm.abs.v8i8(<8 x i8> undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %7 = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> undef, i1 false, <16 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %8 = call <16 x i8> @llvm.abs.v16i8(<16 x i8> undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %9 = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> undef, i1 false, <2 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %10 = call <2 x i64> @llvm.abs.v2i64(<2 x i64> undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %11 = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> undef, i1 false, <4 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %12 = call <4 x i64> @llvm.abs.v4i64(<4 x i64> undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %13 = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> undef, i1 false, <8 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %14 = call <8 x i64> @llvm.abs.v8i64(<8 x i64> undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %15 = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> undef, i1 false, <16 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %16 = call <16 x i64> @llvm.abs.v16i64(<16 x i64> undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %17 = call @llvm.vp.abs.nxv2i8( undef, i1 false, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %18 = call @llvm.abs.nxv2i8( undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %19 = call @llvm.vp.abs.nxv4i8( undef, i1 false, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %20 = call @llvm.abs.nxv4i8( undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %21 = call @llvm.vp.abs.nxv8i8( undef, i1 false, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %22 = call @llvm.abs.nxv8i8( undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %23 = call @llvm.vp.abs.nxv16i8( undef, i1 false, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %24 = call @llvm.abs.nxv16i8( undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %25 = call @llvm.vp.abs.nxv2i64( undef, i1 false, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %26 = call @llvm.abs.nxv2i64( undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %27 = call @llvm.vp.abs.nxv4i64( undef, i1 false, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %28 = call @llvm.abs.nxv4i64( undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %29 = call @llvm.vp.abs.nxv8i64( undef, i1 false, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %30 = call @llvm.abs.nxv8i64( undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %31 = call @llvm.vp.abs.nxv16i64( undef, i1 false, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %32 = call @llvm.abs.nxv16i64( undef, i1 false) +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> undef, i1 0, <2 x i1> undef, i32 undef) + call <2 x i8> @llvm.abs.v2i8(<2 x i8> undef, i1 0) + call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> undef, i1 0, <4 x i1> undef, i32 undef) + call <4 x i8> @llvm.abs.v4i8(<4 x i8> undef, i1 0) + call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> undef, i1 0, <8 x i1> undef, i32 undef) + call <8 x i8> @llvm.abs.v8i8(<8 x i8> undef, i1 0) + call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> undef, i1 0, <16 x i1> undef, i32 undef) + call <16 x i8> @llvm.abs.v16i8(<16 x i8> undef, i1 0) + call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> undef, i1 0, <2 x i1> undef, i32 undef) + call <2 x i64> @llvm.abs.v2i64(<2 x i64> undef, i1 0) + call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> undef, i1 0, <4 x i1> undef, i32 undef) + call <4 x i64> @llvm.abs.v4i64(<4 x i64> undef, i1 0) + call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> undef, i1 0, <8 x i1> undef, i32 undef) + call <8 x i64> @llvm.abs.v8i64(<8 x i64> undef, i1 0) + call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> undef, i1 0, <16 x i1> undef, i32 undef) + call <16 x i64> @llvm.abs.v16i64(<16 x i64> undef, i1 0) + call @llvm.vp.abs.nxv2i8( undef, i1 0, undef, i32 undef) + call @llvm.abs.nxv2i8( undef, i1 0) + call @llvm.vp.abs.nxv4i8( undef, i1 0, undef, i32 undef) + call @llvm.abs.nxv4i8( undef, i1 0) + call @llvm.vp.abs.nxv8i8( undef, i1 0, undef, i32 undef) + call @llvm.abs.nxv8i8( undef, i1 0) + call @llvm.vp.abs.nxv16i8( undef, i1 0, undef, i32 undef) + call @llvm.abs.nxv16i8( undef, i1 0) + call @llvm.vp.abs.nxv2i64( undef, i1 0, undef, i32 undef) + call @llvm.abs.nxv2i64( undef, i1 0) + call @llvm.vp.abs.nxv4i64( undef, i1 0, undef, i32 undef) + call @llvm.abs.nxv4i64( undef, i1 0) + call @llvm.vp.abs.nxv8i64( undef, i1 0, undef, i32 undef) + call @llvm.abs.nxv8i64( undef, i1 0) + call @llvm.vp.abs.nxv16i64( undef, i1 0, undef, i32 undef) + call @llvm.abs.nxv16i64( undef, i1 0) + ret void +} + +define void @load() { +; CHECK-LABEL: 'load' +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t0 = call <2 x i8> @llvm.vp.load.v2i8.p0(ptr undef, <2 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t1 = load <2 x i8>, ptr undef, align 2 +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t2 = call <4 x i8> @llvm.vp.load.v4i8.p0(ptr undef, <4 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t3 = load <4 x i8>, ptr undef, align 4 +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t4 = call <8 x i8> @llvm.vp.load.v8i8.p0(ptr undef, <8 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t5 = load <8 x i8>, ptr undef, align 8 +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t6 = call <16 x i8> @llvm.vp.load.v16i8.p0(ptr undef, <16 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t7 = load <16 x i8>, ptr undef, align 16 +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t8 = call <2 x i64> @llvm.vp.load.v2i64.p0(ptr undef, <2 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t9 = load <2 x i64>, ptr undef, align 16 +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %t10 = call <4 x i64> @llvm.vp.load.v4i64.p0(ptr undef, <4 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %t12 = load <4 x i64>, ptr undef, align 32 +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %t13 = call <8 x i64> @llvm.vp.load.v8i64.p0(ptr undef, <8 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %t14 = load <8 x i64>, ptr undef, align 64 +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %t15 = call <16 x i64> @llvm.vp.load.v16i64.p0(ptr undef, <16 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %t16 = load <16 x i64>, ptr undef, align 128 +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t17 = call @llvm.vp.load.nxv2i8.p0(ptr undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t18 = load , ptr undef, align 2 +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t19 = call @llvm.vp.load.nxv4i8.p0(ptr undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t20 = load , ptr undef, align 4 +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t21 = call @llvm.vp.load.nxv8i8.p0(ptr undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: %t22 = load , ptr undef, align 8 +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %t23 = call @llvm.vp.load.nxv16i8.p0(ptr undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %t24 = load , ptr undef, align 16 +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %t25 = call @llvm.vp.load.nxv2i64.p0(ptr undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: %t26 = load , ptr undef, align 16 +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %t27 = call @llvm.vp.load.nxv4i64.p0(ptr undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %t28 = load , ptr undef, align 32 +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %t29 = call @llvm.vp.load.nxv8i64.p0(ptr undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %t30 = load , ptr undef, align 64 +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %t31 = call @llvm.vp.load.nxv16i64.p0(ptr undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: %t32 = load , ptr undef, align 128 +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + %t0 = call <2 x i8> @llvm.vp.load.v2i8(ptr undef, <2 x i1> undef, i32 undef) + %t1 = load <2 x i8>, ptr undef + %t2 = call <4 x i8> @llvm.vp.load.v4i8(ptr undef, <4 x i1> undef, i32 undef) + %t3 = load <4 x i8>, ptr undef + %t4 = call <8 x i8> @llvm.vp.load.v8i8(ptr undef, <8 x i1> undef, i32 undef) + %t5 = load <8 x i8>, ptr undef + %t6 = call <16 x i8> @llvm.vp.load.v16i8(ptr undef, <16 x i1> undef, i32 undef) + %t7 = load <16 x i8>, ptr undef + %t8 = call <2 x i64> @llvm.vp.load.v2i64(ptr undef, <2 x i1> undef, i32 undef) + %t9 = load <2 x i64>, ptr undef + %t10 = call <4 x i64> @llvm.vp.load.v4i64(ptr undef, <4 x i1> undef, i32 undef) + %t12 = load <4 x i64>, ptr undef + %t13 = call <8 x i64> @llvm.vp.load.v8i64(ptr undef, <8 x i1> undef, i32 undef) + %t14 = load <8 x i64>, ptr undef + %t15 = call <16 x i64> @llvm.vp.load.v16i64(ptr undef, <16 x i1> undef, i32 undef) + %t16 = load <16 x i64>, ptr undef + %t17 = call @llvm.vp.load.nxv2i8(ptr undef, undef, i32 undef) + %t18 = load , ptr undef + %t19 = call @llvm.vp.load.nxv4i8(ptr undef, undef, i32 undef) + %t20 = load , ptr undef + %t21 = call @llvm.vp.load.nxv8i8(ptr undef, undef, i32 undef) + %t22 = load , ptr undef + %t23 = call @llvm.vp.load.nxv16i8(ptr undef, undef, i32 undef) + %t24 = load , ptr undef + %t25 = call @llvm.vp.load.nxv2i64(ptr undef, undef, i32 undef) + %t26 = load , ptr undef + %t27 = call @llvm.vp.load.nxv4i64(ptr undef, undef, i32 undef) + %t28 = load , ptr undef + %t29 = call @llvm.vp.load.nxv8i64(ptr undef, undef, i32 undef) + %t30 = load , ptr undef + %t31 = call @llvm.vp.load.nxv16i64(ptr undef, undef, i32 undef) + %t32 = load , ptr undef + ret void +} + +define void @store() { +; CHECK-LABEL: 'store' +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: call void @llvm.vp.store.v2i8.p0(<2 x i8> undef, ptr undef, <2 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: store <2 x i8> undef, ptr undef, align 2 +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: call void @llvm.vp.store.v4i8.p0(<4 x i8> undef, ptr undef, <4 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: store <4 x i8> undef, ptr undef, align 4 +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: call void @llvm.vp.store.v8i8.p0(<8 x i8> undef, ptr undef, <8 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: store <8 x i8> undef, ptr undef, align 8 +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: call void @llvm.vp.store.v16i8.p0(<16 x i8> undef, ptr undef, <16 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: store <16 x i8> undef, ptr undef, align 16 +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: call void @llvm.vp.store.v2i64.p0(<2 x i64> undef, ptr undef, <2 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: store <2 x i64> undef, ptr undef, align 16 +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: call void @llvm.vp.store.v4i64.p0(<4 x i64> undef, ptr undef, <4 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: store <4 x i64> undef, ptr undef, align 32 +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.vp.store.v8i64.p0(<8 x i64> undef, ptr undef, <8 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: store <8 x i64> undef, ptr undef, align 64 +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.vp.store.v16i64.p0(<16 x i64> undef, ptr undef, <16 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: store <16 x i64> undef, ptr undef, align 128 +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: call void @llvm.vp.store.nxv2i8.p0( undef, ptr undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: store undef, ptr undef, align 2 +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: call void @llvm.vp.store.nxv4i8.p0( undef, ptr undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: store undef, ptr undef, align 4 +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: call void @llvm.vp.store.nxv8i8.p0( undef, ptr undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: store undef, ptr undef, align 8 +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: call void @llvm.vp.store.nxv16i8.p0( undef, ptr undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: store undef, ptr undef, align 16 +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: call void @llvm.vp.store.nxv2i64.p0( undef, ptr undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 2 for instruction: store undef, ptr undef, align 16 +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: call void @llvm.vp.store.nxv4i64.p0( undef, ptr undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: store undef, ptr undef, align 32 +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: call void @llvm.vp.store.nxv8i64.p0( undef, ptr undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: store undef, ptr undef, align 64 +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: call void @llvm.vp.store.nxv16i64.p0( undef, ptr undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 16 for instruction: store undef, ptr undef, align 128 +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call void @llvm.vp.store.v2i8(<2 x i8> undef, ptr undef, <2 x i1> undef, i32 undef) + store <2 x i8> undef, ptr undef + call void @llvm.vp.store.v4i8(<4 x i8> undef, ptr undef, <4 x i1> undef, i32 undef) + store <4 x i8> undef, ptr undef + call void @llvm.vp.store.v8i8(<8 x i8> undef, ptr undef, <8 x i1> undef, i32 undef) + store <8 x i8> undef, ptr undef + call void @llvm.vp.store.v16i8(<16 x i8> undef, ptr undef, <16 x i1> undef, i32 undef) + store <16 x i8> undef, ptr undef + call void @llvm.vp.store.v2i64(<2 x i64> undef, ptr undef, <2 x i1> undef, i32 undef) + store <2 x i64> undef, ptr undef + call void @llvm.vp.store.v4i64(<4 x i64> undef, ptr undef, <4 x i1> undef, i32 undef) + store <4 x i64> undef, ptr undef + call void @llvm.vp.store.v8i64(<8 x i64> undef, ptr undef, <8 x i1> undef, i32 undef) + store <8 x i64> undef, ptr undef + call void @llvm.vp.store.v16i64(<16 x i64> undef, ptr undef, <16 x i1> undef, i32 undef) + store <16 x i64> undef, ptr undef + call void @llvm.vp.store.nxv2i8( undef, ptr undef, undef, i32 undef) + store undef, ptr undef + call void @llvm.vp.store.nxv4i8( undef, ptr undef, undef, i32 undef) + store undef, ptr undef + call void @llvm.vp.store.nxv8i8( undef, ptr undef, undef, i32 undef) + store undef, ptr undef + call void @llvm.vp.store.nxv16i8( undef, ptr undef, undef, i32 undef) + store undef, ptr undef + call void @llvm.vp.store.nxv2i64( undef, ptr undef, undef, i32 undef) + store undef, ptr undef + call void @llvm.vp.store.nxv4i64( undef, ptr undef, undef, i32 undef) + store undef, ptr undef + call void @llvm.vp.store.nxv8i64( undef, ptr undef, undef, i32 undef) + store undef, ptr undef + call void @llvm.vp.store.nxv16i64( undef, ptr undef, undef, i32 undef) + store undef, ptr undef + ret void +} + +define void @reduce_add() { +; CHECK-LABEL: 'reduce_add' +; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %1 = call i8 @llvm.vp.reduce.add.v2i8(i8 undef, <2 x i8> undef, <2 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %2 = call i8 @llvm.vector.reduce.add.v2i8(<2 x i8> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %3 = call i8 @llvm.vp.reduce.add.v4i8(i8 undef, <4 x i8> undef, <4 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %4 = call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %5 = call i8 @llvm.vp.reduce.add.v8i8(i8 undef, <8 x i8> undef, <8 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %6 = call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %7 = call i8 @llvm.vp.reduce.add.v16i8(i8 undef, <16 x i8> undef, <16 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %8 = call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %9 = call i64 @llvm.vp.reduce.add.v2i64(i64 undef, <2 x i64> undef, <2 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 3 for instruction: %10 = call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %11 = call i64 @llvm.vp.reduce.add.v4i64(i64 undef, <4 x i64> undef, <4 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %12 = call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %13 = call i64 @llvm.vp.reduce.add.v8i64(i64 undef, <8 x i64> undef, <8 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %14 = call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %15 = call i64 @llvm.vp.reduce.add.v16i64(i64 undef, <16 x i64> undef, <16 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %16 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %17 = call i8 @llvm.vp.reduce.add.nxv8i8(i8 undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %18 = call i8 @llvm.vector.reduce.add.nxv2i8( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %19 = call i8 @llvm.vp.reduce.add.nxv4i8(i8 undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %20 = call i8 @llvm.vector.reduce.add.nxv4i8( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %21 = call i8 @llvm.vp.reduce.add.nxv8i8(i8 undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %22 = call i8 @llvm.vector.reduce.add.nxv8i8( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %23 = call i8 @llvm.vp.reduce.add.nxv16i8(i8 undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 7 for instruction: %24 = call i8 @llvm.vector.reduce.add.nxv16i8( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %25 = call i64 @llvm.vp.reduce.add.nxv2i64(i64 undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %26 = call i64 @llvm.vector.reduce.add.nxv2i64( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %27 = call i64 @llvm.vp.reduce.add.nxv4i64(i64 undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 5 for instruction: %28 = call i64 @llvm.vector.reduce.add.nxv4i64( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %29 = call i64 @llvm.vp.reduce.add.nxv8i64(i64 undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %30 = call i64 @llvm.vector.reduce.add.nxv8i64( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %31 = call i64 @llvm.vp.reduce.add.nxv16i64(i64 undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 8 for instruction: %32 = call i64 @llvm.vector.reduce.add.nxv16i64( undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call i8 @llvm.vp.reduce.add.v2i8(i8 undef, <2 x i8> undef, <2 x i1> undef, i32 undef) + call i8 @llvm.vector.reduce.add.v2i8(<2 x i8> undef) + call i8 @llvm.vp.reduce.add.v4i8(i8 undef, <4 x i8> undef, <4 x i1> undef, i32 undef) + call i8 @llvm.vector.reduce.add.v4i8(<4 x i8> undef) + call i8 @llvm.vp.reduce.add.v8i8(i8 undef, <8 x i8> undef, <8 x i1> undef, i32 undef) + call i8 @llvm.vector.reduce.add.v8i8(<8 x i8> undef) + call i8 @llvm.vp.reduce.add.v16i8(i8 undef, <16 x i8> undef, <16 x i1> undef, i32 undef) + call i8 @llvm.vector.reduce.add.v16i8(<16 x i8> undef) + call i64 @llvm.vp.reduce.add.v2i64(i64 undef, <2 x i64> undef, <2 x i1> undef, i32 undef) + call i64 @llvm.vector.reduce.add.v2i64(<2 x i64> undef) + call i64 @llvm.vp.reduce.add.v4i64(i64 undef, <4 x i64> undef, <4 x i1> undef, i32 undef) + call i64 @llvm.vector.reduce.add.v4i64(<4 x i64> undef) + call i64 @llvm.vp.reduce.add.v8i64(i64 undef, <8 x i64> undef, <8 x i1> undef, i32 undef) + call i64 @llvm.vector.reduce.add.v8i64(<8 x i64> undef) + call i64 @llvm.vp.reduce.add.v16i64(i64 undef, <16 x i64> undef, <16 x i1> undef, i32 undef) + call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> undef) + call i8 @llvm.vp.reduce.add.nxv8i8(i8 undef, undef, undef, i32 undef) + call i8 @llvm.vector.reduce.add.nxv2i8( undef) + call i8 @llvm.vp.reduce.add.nxv4i8(i8 undef, undef, undef, i32 undef) + call i8 @llvm.vector.reduce.add.nxv4i8( undef) + call i8 @llvm.vp.reduce.add.nxv8i8(i8 undef, undef, undef, i32 undef) + call i8 @llvm.vector.reduce.add.nxv8i8( undef) + call i8 @llvm.vp.reduce.add.nxv16i8(i8 undef, undef, undef, i32 undef) + call i8 @llvm.vector.reduce.add.nxv16i8( undef) + call i64 @llvm.vp.reduce.add.nxv2i64(i64 undef, undef, undef, i32 undef) + call i64 @llvm.vector.reduce.add.nxv2i64( undef) + call i64 @llvm.vp.reduce.add.nxv4i64(i64 undef, undef, undef, i32 undef) + call i64 @llvm.vector.reduce.add.nxv4i64( undef) + call i64 @llvm.vp.reduce.add.nxv8i64(i64 undef, undef, undef, i32 undef) + call i64 @llvm.vector.reduce.add.nxv8i64( undef) + call i64 @llvm.vp.reduce.add.nxv16i64(i64 undef, undef, undef, i32 undef) + call i64 @llvm.vector.reduce.add.nxv16i64( undef) + ret void +} + +define void @reduce_fadd() { +; CHECK-LABEL: 'reduce_fadd' +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %1 = call float @llvm.vp.reduce.fadd.v2f32(float undef, <2 x float> undef, <2 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %2 = call float @llvm.vector.reduce.fadd.v2f32(float undef, <2 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %3 = call float @llvm.vp.reduce.fadd.v4f32(float undef, <4 x float> undef, <4 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %4 = call float @llvm.vector.reduce.fadd.v4f32(float undef, <4 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %5 = call float @llvm.vp.reduce.fadd.v8f32(float undef, <8 x float> undef, <8 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %6 = call float @llvm.vector.reduce.fadd.v8f32(float undef, <8 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %7 = call float @llvm.vp.reduce.fadd.v16f32(float undef, <16 x float> undef, <16 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %8 = call float @llvm.vector.reduce.fadd.v16f32(float undef, <16 x float> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %9 = call double @llvm.vp.reduce.fadd.v2f64(double undef, <2 x double> undef, <2 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 4 for instruction: %10 = call double @llvm.vector.reduce.fadd.v2f64(double undef, <2 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %11 = call double @llvm.vp.reduce.fadd.v4f64(double undef, <4 x double> undef, <4 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %12 = call double @llvm.vector.reduce.fadd.v4f64(double undef, <4 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %13 = call double @llvm.vp.reduce.fadd.v8f64(double undef, <8 x double> undef, <8 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %14 = call double @llvm.vector.reduce.fadd.v8f64(double undef, <8 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %15 = call double @llvm.vp.reduce.fadd.v16f64(double undef, <16 x double> undef, <16 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %16 = call double @llvm.vector.reduce.fadd.v16f64(double undef, <16 x double> undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %17 = call float @llvm.vp.reduce.fadd.nxv2f32(float undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %18 = call float @llvm.vector.reduce.fadd.nxv2f32(float undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %19 = call float @llvm.vp.reduce.fadd.nxv4f32(float undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %20 = call float @llvm.vector.reduce.fadd.nxv4f32(float undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %21 = call float @llvm.vp.reduce.fadd.nxv8f32(float undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %22 = call float @llvm.vector.reduce.fadd.nxv8f32(float undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %23 = call float @llvm.vp.reduce.fadd.nxv16f32(float undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 34 for instruction: %24 = call float @llvm.vector.reduce.fadd.nxv16f32(float undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %25 = call double @llvm.vp.reduce.fadd.nxv2f64(double undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 6 for instruction: %26 = call double @llvm.vector.reduce.fadd.nxv2f64(double undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %27 = call double @llvm.vp.reduce.fadd.nxv4f64(double undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 10 for instruction: %28 = call double @llvm.vector.reduce.fadd.nxv4f64(double undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %29 = call double @llvm.vp.reduce.fadd.nxv8f64(double undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 18 for instruction: %30 = call double @llvm.vector.reduce.fadd.nxv8f64(double undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %31 = call double @llvm.vp.reduce.fadd.nxv16f64(double undef, undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 35 for instruction: %32 = call double @llvm.vector.reduce.fadd.nxv16f64(double undef, undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 0 for instruction: ret void +; + call float @llvm.vp.reduce.fadd.v2f32(float undef, <2 x float> undef, <2 x i1> undef, i32 undef) + call float @llvm.vector.reduce.fadd.v2f32(float undef, <2 x float> undef) + call float @llvm.vp.reduce.fadd.v4f32(float undef, <4 x float> undef, <4 x i1> undef, i32 undef) + call float @llvm.vector.reduce.fadd.v4f32(float undef, <4 x float> undef) + call float @llvm.vp.reduce.fadd.v8f32(float undef, <8 x float> undef, <8 x i1> undef, i32 undef) + call float @llvm.vector.reduce.fadd.v8f32(float undef, <8 x float> undef) + call float @llvm.vp.reduce.fadd.v16f32(float undef, <16 x float> undef, <16 x i1> undef, i32 undef) + call float @llvm.vector.reduce.fadd.v16f32(float undef, <16 x float> undef) + call double @llvm.vp.reduce.fadd.v2f64(double undef, <2 x double> undef, <2 x i1> undef, i32 undef) + call double @llvm.vector.reduce.fadd.v2f64(double undef, <2 x double> undef) + call double @llvm.vp.reduce.fadd.v4f64(double undef, <4 x double> undef, <4 x i1> undef, i32 undef) + call double @llvm.vector.reduce.fadd.v4f64(double undef, <4 x double> undef) + call double @llvm.vp.reduce.fadd.v8f64(double undef, <8 x double> undef, <8 x i1> undef, i32 undef) + call double @llvm.vector.reduce.fadd.v8f64(double undef, <8 x double> undef) + call double @llvm.vp.reduce.fadd.v16f64(double undef, <16 x double> undef, <16 x i1> undef, i32 undef) + call double @llvm.vector.reduce.fadd.v16f64(double undef, <16 x double> undef) + call float @llvm.vp.reduce.fadd.nxv2f32(float undef, undef, undef, i32 undef) + call float @llvm.vector.reduce.fadd.nxv2f32(float undef, undef) + call float @llvm.vp.reduce.fadd.nxv4f32(float undef, undef, undef, i32 undef) + call float @llvm.vector.reduce.fadd.nxv4f32(float undef, undef) + call float @llvm.vp.reduce.fadd.nxv8f32(float undef, undef, undef, i32 undef) + call float @llvm.vector.reduce.fadd.nxv8f32(float undef, undef) + call float @llvm.vp.reduce.fadd.nxv16f32(float undef, undef, undef, i32 undef) + call float @llvm.vector.reduce.fadd.nxv16f32(float undef, undef) + call double @llvm.vp.reduce.fadd.nxv2f64(double undef, undef, undef, i32 undef) + call double @llvm.vector.reduce.fadd.nxv2f64(double undef, undef) + call double @llvm.vp.reduce.fadd.nxv4f64(double undef, undef, undef, i32 undef) + call double @llvm.vector.reduce.fadd.nxv4f64(double undef, undef) + call double @llvm.vp.reduce.fadd.nxv8f64(double undef, undef, undef, i32 undef) + call double @llvm.vector.reduce.fadd.nxv8f64(double undef, undef) + call double @llvm.vp.reduce.fadd.nxv16f64(double undef, undef, undef, i32 undef) + call double @llvm.vector.reduce.fadd.nxv16f64(double undef, undef) + ret void +} + +declare <2 x i8> @llvm.vp.add.v2i8(<2 x i8>, <2 x i8>, <2 x i1>, i32) +declare <4 x i8> @llvm.vp.add.v4i8(<4 x i8>, <4 x i8>, <4 x i1>, i32) +declare <8 x i8> @llvm.vp.add.v8i8(<8 x i8>, <8 x i8>, <8 x i1>, i32) +declare <16 x i8> @llvm.vp.add.v16i8(<16 x i8>, <16 x i8>, <16 x i1>, i32) +declare <2 x i64> @llvm.vp.add.v2i64(<2 x i64>, <2 x i64>, <2 x i1>, i32) +declare <4 x i64> @llvm.vp.add.v4i64(<4 x i64>, <4 x i64>, <4 x i1>, i32) +declare <8 x i64> @llvm.vp.add.v8i64(<8 x i64>, <8 x i64>, <8 x i1>, i32) +declare <16 x i64> @llvm.vp.add.v16i64(<16 x i64>, <16 x i64>, <16 x i1>, i32) +declare @llvm.vp.add.nxv2i8(, , , i32) +declare @llvm.vp.add.nxv4i8(, , , i32) +declare @llvm.vp.add.nxv8i8(, , , i32) +declare @llvm.vp.add.nxv16i8(, , , i32) +declare @llvm.vp.add.nxv2i64(, , , i32) +declare @llvm.vp.add.nxv4i64(, , , i32) +declare @llvm.vp.add.nxv8i64(, , , i32) +declare @llvm.vp.add.nxv16i64(, , , i32) + +declare <2 x i8> @llvm.vp.abs.v2i8(<2 x i8>, i1, <2 x i1>, i32) +declare <4 x i8> @llvm.vp.abs.v4i8(<4 x i8>, i1, <4 x i1>, i32) +declare <8 x i8> @llvm.vp.abs.v8i8(<8 x i8>, i1, <8 x i1>, i32) +declare <16 x i8> @llvm.vp.abs.v16i8(<16 x i8>, i1, <16 x i1>, i32) +declare <2 x i64> @llvm.vp.abs.v2i64(<2 x i64>, i1, <2 x i1>, i32) +declare <4 x i64> @llvm.vp.abs.v4i64(<4 x i64>, i1, <4 x i1>, i32) +declare <8 x i64> @llvm.vp.abs.v8i64(<8 x i64>, i1, <8 x i1>, i32) +declare <16 x i64> @llvm.vp.abs.v16i64(<16 x i64>, i1, <16 x i1>, i32) +declare @llvm.vp.abs.nxv2i8(, i1, , i32) +declare @llvm.vp.abs.nxv4i8(, i1, , i32) +declare @llvm.vp.abs.nxv8i8(, i1, , i32) +declare @llvm.vp.abs.nxv16i8(, i1, , i32) +declare @llvm.vp.abs.nxv2i64(, i1, , i32) +declare @llvm.vp.abs.nxv4i64(, i1, , i32) +declare @llvm.vp.abs.nxv8i64(, i1, , i32) +declare @llvm.vp.abs.nxv16i64(, i1, , i32) + +declare <2 x i8> @llvm.abs.v2i8(<2 x i8>, i1) +declare <4 x i8> @llvm.abs.v4i8(<4 x i8>, i1) +declare <8 x i8> @llvm.abs.v8i8(<8 x i8>, i1) +declare <16 x i8> @llvm.abs.v16i8(<16 x i8>, i1) +declare <2 x i64> @llvm.abs.v2i64(<2 x i64>, i1) +declare <4 x i64> @llvm.abs.v4i64(<4 x i64>, i1) +declare <8 x i64> @llvm.abs.v8i64(<8 x i64>, i1) +declare <16 x i64> @llvm.abs.v16i64(<16 x i64>, i1) +declare @llvm.abs.nxv2i8(, i1) +declare @llvm.abs.nxv4i8(, i1) +declare @llvm.abs.nxv8i8(, i1) +declare @llvm.abs.nxv16i8(, i1) +declare @llvm.abs.nxv2i64(, i1) +declare @llvm.abs.nxv4i64(, i1) +declare @llvm.abs.nxv8i64(, i1) +declare @llvm.abs.nxv16i64(, i1) + +declare <2 x i8> @llvm.vp.load.v2i8(ptr, <2 x i1>, i32) +declare <4 x i8> @llvm.vp.load.v4i8(ptr, <4 x i1>, i32) +declare <8 x i8> @llvm.vp.load.v8i8(ptr, <8 x i1>, i32) +declare <16 x i8> @llvm.vp.load.v16i8(ptr, <16 x i1>, i32) +declare <2 x i64> @llvm.vp.load.v2i64(ptr, <2 x i1>, i32) +declare <4 x i64> @llvm.vp.load.v4i64(ptr, <4 x i1>, i32) +declare <8 x i64> @llvm.vp.load.v8i64(ptr, <8 x i1>, i32) +declare <16 x i64> @llvm.vp.load.v16i64(ptr, <16 x i1>, i32) +declare @llvm.vp.load.nxv2i8(ptr, , i32) +declare @llvm.vp.load.nxv4i8(ptr, , i32) +declare @llvm.vp.load.nxv8i8(ptr, , i32) +declare @llvm.vp.load.nxv16i8(ptr, , i32) +declare @llvm.vp.load.nxv2i64(ptr, , i32) +declare @llvm.vp.load.nxv4i64(ptr, , i32) +declare @llvm.vp.load.nxv8i64(ptr, , i32) +declare @llvm.vp.load.nxv16i64(ptr, , i32) + +declare void @llvm.vp.store.v2i8(<2 x i8>, ptr, <2 x i1>, i32) +declare void @llvm.vp.store.v4i8(<4 x i8>, ptr, <4 x i1>, i32) +declare void @llvm.vp.store.v8i8(<8 x i8>, ptr, <8 x i1>, i32) +declare void @llvm.vp.store.v16i8(<16 x i8>, ptr, <16 x i1>, i32) +declare void @llvm.vp.store.v2i64(<2 x i64>, ptr, <2 x i1>, i32) +declare void @llvm.vp.store.v4i64(<4 x i64>, ptr, <4 x i1>, i32) +declare void @llvm.vp.store.v8i64(<8 x i64>, ptr, <8 x i1>, i32) +declare void @llvm.vp.store.v16i64(<16 x i64>, ptr, <16 x i1>, i32) +declare void @llvm.vp.store.nxv2i8(, ptr, , i32) +declare void @llvm.vp.store.nxv4i8(, ptr, , i32) +declare void @llvm.vp.store.nxv8i8(, ptr, , i32) +declare void @llvm.vp.store.nxv16i8(, ptr, , i32) +declare void @llvm.vp.store.nxv2i64(, ptr, , i32) +declare void @llvm.vp.store.nxv4i64(, ptr, , i32) +declare void @llvm.vp.store.nxv8i64(, ptr, , i32) +declare void @llvm.vp.store.nxv16i64(, ptr, , i32) + +declare i8 @llvm.vector.reduce.add.v2i8(<2 x i8>) +declare i8 @llvm.vector.reduce.add.v4i8(<4 x i8>) +declare i8 @llvm.vector.reduce.add.v8i8(<8 x i8>) +declare i8 @llvm.vector.reduce.add.v16i8(<16 x i8>) +declare i64 @llvm.vector.reduce.add.v2i64(<2 x i64>) +declare i64 @llvm.vector.reduce.add.v4i64(<4 x i64>) +declare i64 @llvm.vector.reduce.add.v8i64(<8 x i64>) +declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>) +declare i8 @llvm.vector.reduce.add.nxv2i8() +declare i8 @llvm.vector.reduce.add.nxv4i8() +declare i8 @llvm.vector.reduce.add.nxv8i8() +declare i8 @llvm.vector.reduce.add.nxv16i8() +declare i64 @llvm.vector.reduce.add.nxv2i64() +declare i64 @llvm.vector.reduce.add.nxv4i64() +declare i64 @llvm.vector.reduce.add.nxv8i64() +declare i64 @llvm.vector.reduce.add.nxv16i64() + +declare i8 @llvm.vp.reduce.add.v2i8(i8, <2 x i8>, <2 x i1>, i32) +declare i8 @llvm.vp.reduce.add.v4i8(i8, <4 x i8>, <4 x i1>, i32) +declare i8 @llvm.vp.reduce.add.v8i8(i8, <8 x i8>, <8 x i1>, i32) +declare i8 @llvm.vp.reduce.add.v16i8(i8, <16 x i8>, <16 x i1>, i32) +declare i64 @llvm.vp.reduce.add.v2i64(i64, <2 x i64>, <2 x i1>, i32) +declare i64 @llvm.vp.reduce.add.v4i64(i64, <4 x i64>, <4 x i1>, i32) +declare i64 @llvm.vp.reduce.add.v8i64(i64, <8 x i64>, <8 x i1>, i32) +declare i64 @llvm.vp.reduce.add.v16i64(i64, <16 x i64>, <16 x i1>, i32) +declare i8 @llvm.vp.reduce.add.nxv2i8(i8, , , i32) +declare i8 @llvm.vp.reduce.add.nxv4i8(i8, , , i32) +declare i8 @llvm.vp.reduce.add.nxv8i8(i8, , , i32) +declare i8 @llvm.vp.reduce.add.nxv16i8(i8, , , i32) +declare i64 @llvm.vp.reduce.add.nxv2i64(i64, , , i32) +declare i64 @llvm.vp.reduce.add.nxv4i64(i64, , , i32) +declare i64 @llvm.vp.reduce.add.nxv8i64(i64, , , i32) +declare i64 @llvm.vp.reduce.add.nxv16i64(i64, , , i32) + +declare float @llvm.vector.reduce.fadd.v2f32(float, <2 x float>) +declare float @llvm.vector.reduce.fadd.v4f32(float, <4 x float>) +declare float @llvm.vector.reduce.fadd.v8f32(float, <8 x float>) +declare float @llvm.vector.reduce.fadd.v16f32(float, <16 x float>) +declare double @llvm.vector.reduce.fadd.v2f64(double, <2 x double>) +declare double @llvm.vector.reduce.fadd.v4f64(double, <4 x double>) +declare double @llvm.vector.reduce.fadd.v8f64(double, <8 x double>) +declare double @llvm.vector.reduce.fadd.v16f64(double, <16 x double>) +declare float @llvm.vector.reduce.fadd.nxv2f32(float, ) +declare float @llvm.vector.reduce.fadd.nxv4f32(float, ) +declare float @llvm.vector.reduce.fadd.nxv8f32(float, ) +declare float @llvm.vector.reduce.fadd.nxv16f32(float, ) +declare double @llvm.vector.reduce.fadd.nxv2f64(double, ) +declare double @llvm.vector.reduce.fadd.nxv4f64(double, ) +declare double @llvm.vector.reduce.fadd.nxv8f64(double, ) +declare double @llvm.vector.reduce.fadd.nxv16f64(double, ) + +declare float @llvm.vp.reduce.fadd.v2f32(float, <2 x float>, <2 x i1>, i32) +declare float @llvm.vp.reduce.fadd.v4f32(float, <4 x float>, <4 x i1>, i32) +declare float @llvm.vp.reduce.fadd.v8f32(float, <8 x float>, <8 x i1>, i32) +declare float @llvm.vp.reduce.fadd.v16f32(float, <16 x float>, <16 x i1>, i32) +declare double @llvm.vp.reduce.fadd.v2f64(double, <2 x double>, <2 x i1>, i32) +declare double @llvm.vp.reduce.fadd.v4f64(double, <4 x double>, <4 x i1>, i32) +declare double @llvm.vp.reduce.fadd.v8f64(double, <8 x double>, <8 x i1>, i32) +declare double @llvm.vp.reduce.fadd.v16f64(double, <16 x double>, <16 x i1>, i32) +declare float @llvm.vp.reduce.fadd.nxv2f32(float, , , i32) +declare float @llvm.vp.reduce.fadd.nxv4f32(float, , , i32) +declare float @llvm.vp.reduce.fadd.nxv8f32(float, , , i32) +declare float @llvm.vp.reduce.fadd.nxv16f32(float, , , i32) +declare double @llvm.vp.reduce.fadd.nxv2f64(double, , , i32) +declare double @llvm.vp.reduce.fadd.nxv4f64(double, , , i32) +declare double @llvm.vp.reduce.fadd.nxv8f64(double, , , i32) +declare double @llvm.vp.reduce.fadd.nxv16f64(double, , , i32) + declare @llvm.fshr.nxv4i32( %a, %b, %c) declare @llvm.fshl.nxv4i32( %a, %b, %c) - declare @llvm.pow.nxv4f32(, ) declare @llvm.powi.nxv4f32.i32(, i32) declare @llvm.nearbyint.nxv4f32()