diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td index 7f3c2be90d820..42a5fbec95174 100644 --- a/llvm/include/llvm/Target/TargetSelectionDAG.td +++ b/llvm/include/llvm/Target/TargetSelectionDAG.td @@ -840,6 +840,10 @@ def vector_insert_subvec : SDNode<"ISD::INSERT_SUBVECTOR", def extract_subvector : SDNode<"ISD::EXTRACT_SUBVECTOR", SDTSubVecExtract, []>; def insert_subvector : SDNode<"ISD::INSERT_SUBVECTOR", SDTSubVecInsert, []>; +def find_last_active + : SDNode<"ISD::VECTOR_FIND_LAST_ACTIVE", + SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisVec<1>]>, []>; + // Nodes for intrinsics, you should use the intrinsic itself and let tblgen use // these internally. Don't reference these directly. def intrinsic_void : SDNode<"ISD::INTRINSIC_VOID", diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp index 8617377ffc55b..8b8299f9e4911 100644 --- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1452,6 +1452,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, setOperationAction(ISD::VECTOR_DEINTERLEAVE, VT, Custom); setOperationAction(ISD::VECTOR_INTERLEAVE, VT, Custom); } + for (auto VT : {MVT::nxv16i1, MVT::nxv8i1, MVT::nxv4i1, MVT::nxv2i1}) + setOperationAction(ISD::VECTOR_FIND_LAST_ACTIVE, VT, Legal); } if (Subtarget->isSVEorStreamingSVEAvailable()) { @@ -19730,6 +19732,33 @@ performLastTrueTestVectorCombine(SDNode *N, return getPTest(DAG, N->getValueType(0), Pg, N0, AArch64CC::LAST_ACTIVE); } +static SDValue +performExtractLastActiveCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, + const AArch64Subtarget *Subtarget) { + assert(N->getOpcode() == ISD::EXTRACT_VECTOR_ELT); + SelectionDAG &DAG = DCI.DAG; + SDValue Vec = N->getOperand(0); + SDValue Idx = N->getOperand(1); + + if (DCI.isBeforeLegalize() || Idx.getOpcode() != ISD::VECTOR_FIND_LAST_ACTIVE) + return SDValue(); + + // Only legal for 8, 16, 32, and 64 bit element types. + EVT EltVT = Vec.getValueType().getVectorElementType(); + if (!is_contained(ArrayRef({MVT::i8, MVT::i16, MVT::i32, MVT::i64, MVT::f16, + MVT::bf16, MVT::f32, MVT::f64}), + EltVT.getSimpleVT().SimpleTy)) + return SDValue(); + + SDValue Mask = Idx.getOperand(0); + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + if (!TLI.isOperationLegal(ISD::VECTOR_FIND_LAST_ACTIVE, Mask.getValueType())) + return SDValue(); + + return DAG.getNode(AArch64ISD::LASTB, SDLoc(N), N->getValueType(0), Mask, + Vec); +} + static SDValue performExtractVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, const AArch64Subtarget *Subtarget) { @@ -19738,6 +19767,8 @@ performExtractVectorEltCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, return Res; if (SDValue Res = performLastTrueTestVectorCombine(N, DCI, Subtarget)) return Res; + if (SDValue Res = performExtractLastActiveCombine(N, DCI, Subtarget)) + return Res; SelectionDAG &DAG = DCI.DAG; SDValue N0 = N->getOperand(0), N1 = N->getOperand(1); @@ -24852,6 +24883,39 @@ static SDValue reassociateCSELOperandsForCSE(SDNode *N, SelectionDAG &DAG) { } } +static SDValue foldCSELofLASTB(SDNode *Op, SelectionDAG &DAG) { + AArch64CC::CondCode OpCC = + static_cast(Op->getConstantOperandVal(2)); + + if (OpCC != AArch64CC::NE) + return SDValue(); + + SDValue PTest = Op->getOperand(3); + if (PTest.getOpcode() != AArch64ISD::PTEST_ANY) + return SDValue(); + + SDValue TruePred = PTest.getOperand(0); + SDValue AnyPred = PTest.getOperand(1); + + if (TruePred.getOpcode() == AArch64ISD::REINTERPRET_CAST) + TruePred = TruePred.getOperand(0); + + if (AnyPred.getOpcode() == AArch64ISD::REINTERPRET_CAST) + AnyPred = AnyPred.getOperand(0); + + if (TruePred != AnyPred && TruePred.getOpcode() != AArch64ISD::PTRUE) + return SDValue(); + + SDValue LastB = Op->getOperand(0); + SDValue Default = Op->getOperand(1); + + if (LastB.getOpcode() != AArch64ISD::LASTB || LastB.getOperand(0) != AnyPred) + return SDValue(); + + return DAG.getNode(AArch64ISD::CLASTB_N, SDLoc(Op), Op->getValueType(0), + AnyPred, Default, LastB.getOperand(1)); +} + // Optimize CSEL instructions static SDValue performCSELCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, @@ -24897,6 +24961,10 @@ static SDValue performCSELCombine(SDNode *N, } } + // CSEL (LASTB P, Z), X, NE(ANY P) -> CLASTB P, X, Z + if (SDValue CondLast = foldCSELofLASTB(N, DAG)) + return CondLast; + return performCONDCombine(N, DCI, DAG, 2, 3); } diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td index 524fccb8d43e6..28aecd14e33fa 100644 --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -3379,6 +3379,20 @@ let Predicates = [HasSVE_or_SME] in { def : Pat<(i64 (vector_extract nxv2i64:$vec, VectorIndexD:$index)), (UMOVvi64 (v2i64 (EXTRACT_SUBREG ZPR:$vec, zsub)), VectorIndexD:$index)>; + // Find index of last active lane. This is a fallback in case we miss the + // opportunity to fold into a lastb or clastb directly. + def : Pat<(i64(find_last_active nxv16i1:$P1)), + (INSERT_SUBREG(IMPLICIT_DEF), (LASTB_RPZ_B $P1, (INDEX_II_B 0, 1)), + sub_32)>; + def : Pat<(i64(find_last_active nxv8i1:$P1)), + (INSERT_SUBREG(IMPLICIT_DEF), (LASTB_RPZ_H $P1, (INDEX_II_H 0, 1)), + sub_32)>; + def : Pat<(i64(find_last_active nxv4i1:$P1)), + (INSERT_SUBREG(IMPLICIT_DEF), (LASTB_RPZ_S $P1, (INDEX_II_S 0, 1)), + sub_32)>; + def : Pat<(i64(find_last_active nxv2i1:$P1)), (LASTB_RPZ_D $P1, (INDEX_II_D 0, + 1))>; + // Move element from the bottom 128-bits of a scalable vector to a single-element vector. // Alternative case where insertelement is just scalar_to_vector rather than vector_insert. def : Pat<(v1f64 (scalar_to_vector diff --git a/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll b/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll index 3b11e67d072e7..bb9a09ca3cc80 100644 --- a/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll +++ b/llvm/test/CodeGen/AArch64/vector-extract-last-active.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 3 -; RUN: llc -mtriple=aarch64 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,NEON-FIXED -; RUN: llc -mtriple=aarch64 -mattr=+sve -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,SVE-FIXED +; RUN: llc -mtriple=aarch64 -mattr=+bf16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,NEON-FIXED +; RUN: llc -mtriple=aarch64 -mattr=+sve,+bf16 -verify-machineinstrs %s -o - | FileCheck %s --check-prefixes=CHECK,SVE-FIXED define i8 @extract_last_i8(<16 x i8> %data, <16 x i8> %mask, i8 %passthru) { ; NEON-FIXED-LABEL: extract_last_i8: @@ -194,15 +194,115 @@ define i64 @extract_last_i64(<2 x i64> %data, <2 x i64> %mask, i64 %passthru) { ret i64 %res } +define half @extract_last_half(<8 x half> %data, <8 x i16> %mask, half %passthru) { +; NEON-FIXED-LABEL: extract_last_half: +; NEON-FIXED: // %bb.0: +; NEON-FIXED-NEXT: sub sp, sp, #16 +; NEON-FIXED-NEXT: .cfi_def_cfa_offset 16 +; NEON-FIXED-NEXT: cmtst v1.8h, v1.8h, v1.8h +; NEON-FIXED-NEXT: adrp x8, .LCPI4_0 +; NEON-FIXED-NEXT: mov x9, sp +; NEON-FIXED-NEXT: ldr d4, [x8, :lo12:.LCPI4_0] +; NEON-FIXED-NEXT: str q0, [sp] +; NEON-FIXED-NEXT: // kill: def $h2 killed $h2 def $s2 +; NEON-FIXED-NEXT: xtn v3.8b, v1.8h +; NEON-FIXED-NEXT: umaxv h1, v1.8h +; NEON-FIXED-NEXT: and v3.8b, v3.8b, v4.8b +; NEON-FIXED-NEXT: umaxv b3, v3.8b +; NEON-FIXED-NEXT: fmov w8, s3 +; NEON-FIXED-NEXT: bfi x9, x8, #1, #3 +; NEON-FIXED-NEXT: fmov w8, s1 +; NEON-FIXED-NEXT: ldr h0, [x9] +; NEON-FIXED-NEXT: tst w8, #0x1 +; NEON-FIXED-NEXT: fcsel s0, s0, s2, ne +; NEON-FIXED-NEXT: // kill: def $h0 killed $h0 killed $s0 +; NEON-FIXED-NEXT: add sp, sp, #16 +; NEON-FIXED-NEXT: ret +; +; SVE-FIXED-LABEL: extract_last_half: +; SVE-FIXED: // %bb.0: +; SVE-FIXED-NEXT: sub sp, sp, #16 +; SVE-FIXED-NEXT: .cfi_def_cfa_offset 16 +; SVE-FIXED-NEXT: cmtst v1.8h, v1.8h, v1.8h +; SVE-FIXED-NEXT: index z4.b, #0, #1 +; SVE-FIXED-NEXT: mov x9, sp +; SVE-FIXED-NEXT: str q0, [sp] +; SVE-FIXED-NEXT: xtn v3.8b, v1.8h +; SVE-FIXED-NEXT: umaxv h1, v1.8h +; SVE-FIXED-NEXT: and v3.8b, v3.8b, v4.8b +; SVE-FIXED-NEXT: umaxv b3, v3.8b +; SVE-FIXED-NEXT: fmov w8, s3 +; SVE-FIXED-NEXT: bfi x9, x8, #1, #3 +; SVE-FIXED-NEXT: fmov w8, s1 +; SVE-FIXED-NEXT: ldr h0, [x9] +; SVE-FIXED-NEXT: tst w8, #0x1 +; SVE-FIXED-NEXT: fcsel h0, h0, h2, ne +; SVE-FIXED-NEXT: add sp, sp, #16 +; SVE-FIXED-NEXT: ret + %notzero = icmp ne <8 x i16> %mask, zeroinitializer + %res = call half @llvm.experimental.vector.extract.last.active.v8f16(<8 x half> %data, <8 x i1> %notzero, half %passthru) + ret half %res +} + +define bfloat @extract_last_bfloat(<8 x bfloat> %data, <8 x i16> %mask, bfloat %passthru) { +; NEON-FIXED-LABEL: extract_last_bfloat: +; NEON-FIXED: // %bb.0: +; NEON-FIXED-NEXT: sub sp, sp, #16 +; NEON-FIXED-NEXT: .cfi_def_cfa_offset 16 +; NEON-FIXED-NEXT: cmtst v1.8h, v1.8h, v1.8h +; NEON-FIXED-NEXT: adrp x8, .LCPI5_0 +; NEON-FIXED-NEXT: mov x9, sp +; NEON-FIXED-NEXT: ldr d4, [x8, :lo12:.LCPI5_0] +; NEON-FIXED-NEXT: str q0, [sp] +; NEON-FIXED-NEXT: // kill: def $h2 killed $h2 def $s2 +; NEON-FIXED-NEXT: xtn v3.8b, v1.8h +; NEON-FIXED-NEXT: umaxv h1, v1.8h +; NEON-FIXED-NEXT: and v3.8b, v3.8b, v4.8b +; NEON-FIXED-NEXT: umaxv b3, v3.8b +; NEON-FIXED-NEXT: fmov w8, s3 +; NEON-FIXED-NEXT: bfi x9, x8, #1, #3 +; NEON-FIXED-NEXT: fmov w8, s1 +; NEON-FIXED-NEXT: ldr h0, [x9] +; NEON-FIXED-NEXT: tst w8, #0x1 +; NEON-FIXED-NEXT: fcsel s0, s0, s2, ne +; NEON-FIXED-NEXT: // kill: def $h0 killed $h0 killed $s0 +; NEON-FIXED-NEXT: add sp, sp, #16 +; NEON-FIXED-NEXT: ret +; +; SVE-FIXED-LABEL: extract_last_bfloat: +; SVE-FIXED: // %bb.0: +; SVE-FIXED-NEXT: sub sp, sp, #16 +; SVE-FIXED-NEXT: .cfi_def_cfa_offset 16 +; SVE-FIXED-NEXT: cmtst v1.8h, v1.8h, v1.8h +; SVE-FIXED-NEXT: index z4.b, #0, #1 +; SVE-FIXED-NEXT: mov x9, sp +; SVE-FIXED-NEXT: str q0, [sp] +; SVE-FIXED-NEXT: xtn v3.8b, v1.8h +; SVE-FIXED-NEXT: umaxv h1, v1.8h +; SVE-FIXED-NEXT: and v3.8b, v3.8b, v4.8b +; SVE-FIXED-NEXT: umaxv b3, v3.8b +; SVE-FIXED-NEXT: fmov w8, s3 +; SVE-FIXED-NEXT: bfi x9, x8, #1, #3 +; SVE-FIXED-NEXT: fmov w8, s1 +; SVE-FIXED-NEXT: ldr h0, [x9] +; SVE-FIXED-NEXT: tst w8, #0x1 +; SVE-FIXED-NEXT: fcsel h0, h0, h2, ne +; SVE-FIXED-NEXT: add sp, sp, #16 +; SVE-FIXED-NEXT: ret + %notzero = icmp ne <8 x i16> %mask, zeroinitializer + %res = call bfloat @llvm.experimental.vector.extract.last.active.v8bf16(<8 x bfloat> %data, <8 x i1> %notzero, bfloat %passthru) + ret bfloat %res +} + define float @extract_last_float(<4 x float> %data, <4 x i32> %mask, float %passthru) { ; NEON-FIXED-LABEL: extract_last_float: ; NEON-FIXED: // %bb.0: ; NEON-FIXED-NEXT: sub sp, sp, #16 ; NEON-FIXED-NEXT: .cfi_def_cfa_offset 16 ; NEON-FIXED-NEXT: cmtst v1.4s, v1.4s, v1.4s -; NEON-FIXED-NEXT: adrp x8, .LCPI4_0 +; NEON-FIXED-NEXT: adrp x8, .LCPI6_0 ; NEON-FIXED-NEXT: mov x9, sp -; NEON-FIXED-NEXT: ldr d4, [x8, :lo12:.LCPI4_0] +; NEON-FIXED-NEXT: ldr d4, [x8, :lo12:.LCPI6_0] ; NEON-FIXED-NEXT: str q0, [sp] ; NEON-FIXED-NEXT: xtn v3.4h, v1.4s ; NEON-FIXED-NEXT: umaxv s1, v1.4s @@ -248,9 +348,9 @@ define double @extract_last_double(<2 x double> %data, <2 x i64> %mask, double % ; NEON-FIXED-NEXT: sub sp, sp, #16 ; NEON-FIXED-NEXT: .cfi_def_cfa_offset 16 ; NEON-FIXED-NEXT: cmtst v1.2d, v1.2d, v1.2d -; NEON-FIXED-NEXT: adrp x8, .LCPI5_0 +; NEON-FIXED-NEXT: adrp x8, .LCPI7_0 ; NEON-FIXED-NEXT: mov x9, sp -; NEON-FIXED-NEXT: ldr d4, [x8, :lo12:.LCPI5_0] +; NEON-FIXED-NEXT: ldr d4, [x8, :lo12:.LCPI7_0] ; NEON-FIXED-NEXT: str q0, [sp] ; NEON-FIXED-NEXT: xtn v3.2s, v1.2d ; NEON-FIXED-NEXT: umaxv s1, v1.4s @@ -293,17 +393,7 @@ define double @extract_last_double(<2 x double> %data, <2 x i64> %mask, double % define i8 @extract_last_i8_scalable( %data, %mask, i8 %passthru) #0 { ; CHECK-LABEL: extract_last_i8_scalable: ; CHECK: // %bb.0: -; CHECK-NEXT: index z1.b, #0, #1 -; CHECK-NEXT: mov z2.b, #0 // =0x0 -; CHECK-NEXT: ptrue p1.b -; CHECK-NEXT: sel z1.b, p0, z1.b, z2.b -; CHECK-NEXT: umaxv b1, p1, z1.b -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: and x8, x8, #0xff -; CHECK-NEXT: whilels p1.b, xzr, x8 -; CHECK-NEXT: ptest p0, p0.b -; CHECK-NEXT: lastb w8, p1, z0.b -; CHECK-NEXT: csel w0, w8, w0, ne +; CHECK-NEXT: clastb w0, p0, w0, z0.b ; CHECK-NEXT: ret %res = call i8 @llvm.experimental.vector.extract.last.active.nxv16i8( %data, %mask, i8 %passthru) ret i8 %res @@ -312,17 +402,7 @@ define i8 @extract_last_i8_scalable( %data, define i16 @extract_last_i16_scalable( %data, %mask, i16 %passthru) #0 { ; CHECK-LABEL: extract_last_i16_scalable: ; CHECK: // %bb.0: -; CHECK-NEXT: index z1.h, #0, #1 -; CHECK-NEXT: mov z2.h, #0 // =0x0 -; CHECK-NEXT: ptrue p1.h -; CHECK-NEXT: sel z1.h, p0, z1.h, z2.h -; CHECK-NEXT: umaxv h1, p1, z1.h -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: and x8, x8, #0xffff -; CHECK-NEXT: whilels p2.h, xzr, x8 -; CHECK-NEXT: ptest p1, p0.b -; CHECK-NEXT: lastb w8, p2, z0.h -; CHECK-NEXT: csel w0, w8, w0, ne +; CHECK-NEXT: clastb w0, p0, w0, z0.h ; CHECK-NEXT: ret %res = call i16 @llvm.experimental.vector.extract.last.active.nxv8i16( %data, %mask, i16 %passthru) ret i16 %res @@ -331,17 +411,7 @@ define i16 @extract_last_i16_scalable( %data, %data, %mask, i32 %passthru) #0 { ; CHECK-LABEL: extract_last_i32_scalable: ; CHECK: // %bb.0: -; CHECK-NEXT: index z1.s, #0, #1 -; CHECK-NEXT: mov z2.s, #0 // =0x0 -; CHECK-NEXT: ptrue p1.s -; CHECK-NEXT: sel z1.s, p0, z1.s, z2.s -; CHECK-NEXT: umaxv s1, p1, z1.s -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: mov w8, w8 -; CHECK-NEXT: whilels p2.s, xzr, x8 -; CHECK-NEXT: ptest p1, p0.b -; CHECK-NEXT: lastb w8, p2, z0.s -; CHECK-NEXT: csel w0, w8, w0, ne +; CHECK-NEXT: clastb w0, p0, w0, z0.s ; CHECK-NEXT: ret %res = call i32 @llvm.experimental.vector.extract.last.active.nxv4i32( %data, %mask, i32 %passthru) ret i32 %res @@ -350,35 +420,37 @@ define i32 @extract_last_i32_scalable( %data, %data, %mask, i64 %passthru) #0 { ; CHECK-LABEL: extract_last_i64_scalable: ; CHECK: // %bb.0: -; CHECK-NEXT: index z1.d, #0, #1 -; CHECK-NEXT: mov z2.d, #0 // =0x0 -; CHECK-NEXT: ptrue p1.d -; CHECK-NEXT: sel z1.d, p0, z1.d, z2.d -; CHECK-NEXT: umaxv d1, p1, z1.d -; CHECK-NEXT: fmov x8, d1 -; CHECK-NEXT: whilels p2.d, xzr, x8 -; CHECK-NEXT: ptest p1, p0.b -; CHECK-NEXT: lastb x8, p2, z0.d -; CHECK-NEXT: csel x0, x8, x0, ne +; CHECK-NEXT: clastb x0, p0, x0, z0.d ; CHECK-NEXT: ret %res = call i64 @llvm.experimental.vector.extract.last.active.nxv2i64( %data, %mask, i64 %passthru) ret i64 %res } +define half @extract_last_half_scalable( %data, %mask, half %passthru) #0 { +; CHECK-LABEL: extract_last_half_scalable: +; CHECK: // %bb.0: +; CHECK-NEXT: clastb h1, p0, h1, z0.h +; CHECK-NEXT: fmov s0, s1 +; CHECK-NEXT: ret + %res = call half @llvm.experimental.vector.extract.last.active.nxv8f16( %data, %mask, half %passthru) + ret half %res +} + +define bfloat @extract_last_bfloat_scalable( %data, %mask, bfloat %passthru) #0 { +; CHECK-LABEL: extract_last_bfloat_scalable: +; CHECK: // %bb.0: +; CHECK-NEXT: clastb h1, p0, h1, z0.h +; CHECK-NEXT: fmov s0, s1 +; CHECK-NEXT: ret + %res = call bfloat @llvm.experimental.vector.extract.last.active.nxv8bf16( %data, %mask, bfloat %passthru) + ret bfloat %res +} + define float @extract_last_float_scalable( %data, %mask, float %passthru) #0 { ; CHECK-LABEL: extract_last_float_scalable: ; CHECK: // %bb.0: -; CHECK-NEXT: index z2.s, #0, #1 -; CHECK-NEXT: mov z3.s, #0 // =0x0 -; CHECK-NEXT: ptrue p1.s -; CHECK-NEXT: sel z2.s, p0, z2.s, z3.s -; CHECK-NEXT: umaxv s2, p1, z2.s -; CHECK-NEXT: fmov w8, s2 -; CHECK-NEXT: mov w8, w8 -; CHECK-NEXT: whilels p2.s, xzr, x8 -; CHECK-NEXT: ptest p1, p0.b -; CHECK-NEXT: lastb s0, p2, z0.s -; CHECK-NEXT: fcsel s0, s0, s1, ne +; CHECK-NEXT: clastb s1, p0, s1, z0.s +; CHECK-NEXT: fmov s0, s1 ; CHECK-NEXT: ret %res = call float @llvm.experimental.vector.extract.last.active.nxv4f32( %data, %mask, float %passthru) ret float %res @@ -387,16 +459,8 @@ define float @extract_last_float_scalable( %data, %data, %mask, double %passthru) #0 { ; CHECK-LABEL: extract_last_double_scalable: ; CHECK: // %bb.0: -; CHECK-NEXT: index z2.d, #0, #1 -; CHECK-NEXT: mov z3.d, #0 // =0x0 -; CHECK-NEXT: ptrue p1.d -; CHECK-NEXT: sel z2.d, p0, z2.d, z3.d -; CHECK-NEXT: umaxv d2, p1, z2.d -; CHECK-NEXT: fmov x8, d2 -; CHECK-NEXT: whilels p2.d, xzr, x8 -; CHECK-NEXT: ptest p1, p0.b -; CHECK-NEXT: lastb d0, p2, z0.d -; CHECK-NEXT: fcsel d0, d0, d1, ne +; CHECK-NEXT: clastb d1, p0, d1, z0.d +; CHECK-NEXT: fmov d0, d1 ; CHECK-NEXT: ret %res = call double @llvm.experimental.vector.extract.last.active.nxv2f64( %data, %mask, double %passthru) ret double %res @@ -406,31 +470,42 @@ define double @extract_last_double_scalable( %data, %data, %mask) #0 { ; CHECK-LABEL: extract_last_i8_scalable_poison_passthru: ; CHECK: // %bb.0: -; CHECK-NEXT: index z1.b, #0, #1 -; CHECK-NEXT: mov z2.b, #0 // =0x0 -; CHECK-NEXT: sel z1.b, p0, z1.b, z2.b -; CHECK-NEXT: ptrue p0.b -; CHECK-NEXT: umaxv b1, p0, z1.b -; CHECK-NEXT: fmov w8, s1 -; CHECK-NEXT: and x8, x8, #0xff -; CHECK-NEXT: whilels p0.b, xzr, x8 ; CHECK-NEXT: lastb w0, p0, z0.b ; CHECK-NEXT: ret %res = call i8 @llvm.experimental.vector.extract.last.active.nxv16i8( %data, %mask, i8 poison) ret i8 %res } +;; (c)lastb doesn't exist for predicate types; check we get functional codegen +define i1 @extract_last_i1_scalable( %data, %mask) #0 { +; CHECK-LABEL: extract_last_i1_scalable: +; CHECK: // %bb.0: +; CHECK-NEXT: mov z0.b, p0/z, #1 // =0x1 +; CHECK-NEXT: ptest p1, p1.b +; CHECK-NEXT: cset w9, ne +; CHECK-NEXT: lastb w8, p1, z0.b +; CHECK-NEXT: and w0, w9, w8 +; CHECK-NEXT: ret + %res = call i1 @llvm.experimental.vector.extract.last.active.nxv16i1( %data, %mask, i1 false) + ret i1 %res +} + declare i8 @llvm.experimental.vector.extract.last.active.v16i8(<16 x i8>, <16 x i1>, i8) declare i16 @llvm.experimental.vector.extract.last.active.v8i16(<8 x i16>, <8 x i1>, i16) declare i32 @llvm.experimental.vector.extract.last.active.v4i32(<4 x i32>, <4 x i1>, i32) declare i64 @llvm.experimental.vector.extract.last.active.v2i64(<2 x i64>, <2 x i1>, i64) +declare half @llvm.experimental.vector.extract.last.active.v8f16(<8 x half>, <8 x i1>, half) +declare bfloat @llvm.experimental.vector.extract.last.active.v8bf16(<8 x bfloat>, <8 x i1>, bfloat) declare float @llvm.experimental.vector.extract.last.active.v4f32(<4 x float>, <4 x i1>, float) declare double @llvm.experimental.vector.extract.last.active.v2f64(<2 x double>, <2 x i1>, double) declare i8 @llvm.experimental.vector.extract.last.active.nxv16i8(, , i8) declare i16 @llvm.experimental.vector.extract.last.active.nxv8i16(, , i16) declare i32 @llvm.experimental.vector.extract.last.active.nxv4i32(, , i32) declare i64 @llvm.experimental.vector.extract.last.active.nxv2i64(, , i64) +declare half @llvm.experimental.vector.extract.last.active.nxv8f16(, , half) +declare bfloat @llvm.experimental.vector.extract.last.active.nxv8bf16(, , bfloat) declare float @llvm.experimental.vector.extract.last.active.nxv4f32(, , float) declare double @llvm.experimental.vector.extract.last.active.nxv2f64(, , double) +declare i1 @llvm.experimental.vector.extract.last.active.nxv16i1(, , i1) attributes #0 = { "target-features"="+sve" vscale_range(1, 16) }