diff --git a/llvm/include/llvm/TargetParser/RISCVTargetParser.h b/llvm/include/llvm/TargetParser/RISCVTargetParser.h index c237e1ddd6b38..b13a94cd56f2e 100644 --- a/llvm/include/llvm/TargetParser/RISCVTargetParser.h +++ b/llvm/include/llvm/TargetParser/RISCVTargetParser.h @@ -65,7 +65,7 @@ CPUModel getCPUModel(StringRef CPU); } // namespace RISCV -namespace RISCVII { +namespace RISCVVType { enum VLMUL : uint8_t { LMUL_1 = 0, LMUL_2, @@ -82,9 +82,7 @@ enum { TAIL_AGNOSTIC = 1, MASK_AGNOSTIC = 2, }; -} // namespace RISCVII -namespace RISCVVType { // Is this a SEW value that can be encoded into the VTYPE format. inline static bool isValidSEW(unsigned SEW) { return isPowerOf2_32(SEW) && SEW >= 8 && SEW <= 64; @@ -95,21 +93,21 @@ inline static bool isValidLMUL(unsigned LMUL, bool Fractional) { return isPowerOf2_32(LMUL) && LMUL <= 8 && (!Fractional || LMUL != 1); } -unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, +unsigned encodeVTYPE(VLMUL VLMUL, unsigned SEW, bool TailAgnostic, bool MaskAgnostic); -inline static RISCVII::VLMUL getVLMUL(unsigned VType) { - unsigned VLMUL = VType & 0x7; - return static_cast(VLMUL); +inline static VLMUL getVLMUL(unsigned VType) { + unsigned VLMul = VType & 0x7; + return static_cast(VLMul); } // Decode VLMUL into 1,2,4,8 and fractional indicator. -std::pair decodeVLMUL(RISCVII::VLMUL VLMUL); +std::pair decodeVLMUL(VLMUL VLMul); -inline static RISCVII::VLMUL encodeLMUL(unsigned LMUL, bool Fractional) { +inline static VLMUL encodeLMUL(unsigned LMUL, bool Fractional) { assert(isValidLMUL(LMUL, Fractional) && "Unsupported LMUL"); unsigned LmulLog2 = Log2_32(LMUL); - return static_cast(Fractional ? 8 - LmulLog2 : LmulLog2); + return static_cast(Fractional ? 8 - LmulLog2 : LmulLog2); } inline static unsigned decodeVSEW(unsigned VSEW) { @@ -133,10 +131,9 @@ inline static bool isMaskAgnostic(unsigned VType) { return VType & 0x80; } void printVType(unsigned VType, raw_ostream &OS); -unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul); +unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul); -std::optional -getSameRatioLMUL(unsigned SEW, RISCVII::VLMUL VLMUL, unsigned EEW); +std::optional getSameRatioLMUL(unsigned SEW, VLMUL VLMUL, unsigned EEW); } // namespace RISCVVType } // namespace llvm diff --git a/llvm/lib/Analysis/ValueTracking.cpp b/llvm/lib/Analysis/ValueTracking.cpp index 91a5f194db9dc..e3e026f7979da 100644 --- a/llvm/lib/Analysis/ValueTracking.cpp +++ b/llvm/lib/Analysis/ValueTracking.cpp @@ -1984,7 +1984,7 @@ static void computeKnownBitsFromOperator(const Operator *I, const ConstantRange Range = getVScaleRange(II->getFunction(), BitWidth); uint64_t SEW = RISCVVType::decodeVSEW( cast(II->getArgOperand(HasAVL))->getZExtValue()); - RISCVII::VLMUL VLMUL = static_cast( + RISCVVType::VLMUL VLMUL = static_cast( cast(II->getArgOperand(1 + HasAVL))->getZExtValue()); uint64_t MaxVLEN = Range.getUnsignedMax().getZExtValue() * RISCV::RVVBitsPerBlock; diff --git a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp index ac87d72b7595c..6d4466b7abf53 100644 --- a/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp +++ b/llvm/lib/Target/RISCV/AsmParser/RISCVAsmParser.cpp @@ -2311,7 +2311,7 @@ ParseStatus RISCVAsmParser::parseVTypeI(OperandVector &Operands) { } if (getLexer().is(AsmToken::EndOfStatement) && State == VTypeState_Done) { - RISCVII::VLMUL VLMUL = RISCVVType::encodeLMUL(Lmul, Fractional); + RISCVVType::VLMUL VLMUL = RISCVVType::encodeLMUL(Lmul, Fractional); if (Fractional) { unsigned ELEN = STI->hasFeature(RISCV::FeatureStdExtZve64x) ? 64 : 32; unsigned MaxSEW = ELEN / Lmul; diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp index 6f0645965d737..56b1639143d8b 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp @@ -1120,7 +1120,7 @@ bool RISCVLegalizerInfo::legalizeExtractSubvector(MachineInstr &MI, // divide exactly. assert( RISCVVType::decodeVLMUL(RISCVTargetLowering::getLMUL(LitTyMVT)).second || - RISCVTargetLowering::getLMUL(LitTyMVT) == RISCVII::VLMUL::LMUL_1); + RISCVTargetLowering::getLMUL(LitTyMVT) == RISCVVType::LMUL_1); // If the vector type is an LMUL-group type, extract a subvector equal to the // nearest full vector register type. @@ -1143,7 +1143,7 @@ bool RISCVLegalizerInfo::legalizeExtractSubvector(MachineInstr &MI, const LLT XLenTy(STI.getXLenVT()); auto SlidedownAmt = MIB.buildVScale(XLenTy, RemIdx); auto [Mask, VL] = buildDefaultVLOps(LitTy, MIB, MRI); - uint64_t Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC; + uint64_t Policy = RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC; auto Slidedown = MIB.buildInstr( RISCV::G_VSLIDEDOWN_VL, {InterLitTy}, {MIB.buildUndef(InterLitTy), Vec, SlidedownAmt, Mask, VL, Policy}); @@ -1265,10 +1265,10 @@ bool RISCVLegalizerInfo::legalizeInsertSubvector(MachineInstr &MI, // Use tail agnostic policy if we're inserting over InterLitTy's tail. ElementCount EndIndex = ElementCount::getScalable(RemIdx) + LitTy.getElementCount(); - uint64_t Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED; + uint64_t Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED; if (STI.expandVScale(EndIndex) == STI.expandVScale(InterLitTy.getElementCount())) - Policy = RISCVII::TAIL_AGNOSTIC; + Policy = RISCVVType::TAIL_AGNOSTIC; Inserted = MIB.buildInstr(RISCV::G_VSLIDEUP_VL, {InsertedDst}, diff --git a/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp b/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp index fb0dc482e6081..0881de90700ab 100644 --- a/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp +++ b/llvm/lib/Target/RISCV/MCA/RISCVCustomBehaviour.cpp @@ -107,32 +107,32 @@ RISCVInstrumentManager::createInstruments(const MCInst &Inst) { LLVM_DEBUG(dbgs() << "RVCB: Found VSETVLI and creating instrument for it: " << Inst << "\n"); unsigned VTypeI = Inst.getOperand(2).getImm(); - RISCVII::VLMUL VLMUL = RISCVVType::getVLMUL(VTypeI); + RISCVVType::VLMUL VLMUL = RISCVVType::getVLMUL(VTypeI); StringRef LMUL; switch (VLMUL) { - case RISCVII::LMUL_1: + case RISCVVType::LMUL_1: LMUL = "M1"; break; - case RISCVII::LMUL_2: + case RISCVVType::LMUL_2: LMUL = "M2"; break; - case RISCVII::LMUL_4: + case RISCVVType::LMUL_4: LMUL = "M4"; break; - case RISCVII::LMUL_8: + case RISCVVType::LMUL_8: LMUL = "M8"; break; - case RISCVII::LMUL_F2: + case RISCVVType::LMUL_F2: LMUL = "MF2"; break; - case RISCVII::LMUL_F4: + case RISCVVType::LMUL_F4: LMUL = "MF4"; break; - case RISCVII::LMUL_F8: + case RISCVVType::LMUL_F8: LMUL = "MF8"; break; - case RISCVII::LMUL_RESERVED: + case RISCVVType::LMUL_RESERVED: llvm_unreachable("Cannot create instrument for LMUL_RESERVED"); } SmallVector Instruments; @@ -166,7 +166,7 @@ RISCVInstrumentManager::createInstruments(const MCInst &Inst) { } static std::pair -getEEWAndEMUL(unsigned Opcode, RISCVII::VLMUL LMUL, uint8_t SEW) { +getEEWAndEMUL(unsigned Opcode, RISCVVType::VLMUL LMUL, uint8_t SEW) { uint8_t EEW; switch (Opcode) { case RISCV::VLM_V: @@ -249,7 +249,7 @@ unsigned RISCVInstrumentManager::getSchedClassID( const RISCVVInversePseudosTable::PseudoInfo *RVV = nullptr; if (opcodeHasEEWAndEMULInfo(Opcode)) { - RISCVII::VLMUL VLMUL = static_cast(LMUL); + RISCVVType::VLMUL VLMUL = static_cast(LMUL); auto [EEW, EMUL] = getEEWAndEMUL(Opcode, VLMUL, SEW); RVV = RISCVVInversePseudosTable::getBaseInfo(Opcode, EMUL, EEW); } else { diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h index e1e50ac2078a7..58eb48ed613df 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVBaseInfo.h @@ -141,8 +141,8 @@ static inline unsigned getFormat(uint64_t TSFlags) { return (TSFlags & InstFormatMask) >> InstFormatShift; } /// \returns the LMUL for the instruction. -static inline VLMUL getLMul(uint64_t TSFlags) { - return static_cast((TSFlags & VLMulMask) >> VLMulShift); +static inline RISCVVType::VLMUL getLMul(uint64_t TSFlags) { + return static_cast((TSFlags & VLMulMask) >> VLMulShift); } /// \returns true if this a _TIED pseudo. static inline bool isTiedPseudo(uint64_t TSFlags) { diff --git a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp index d5254719b3839..a4a40862a67c6 100644 --- a/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp +++ b/llvm/lib/Target/RISCV/MCTargetDesc/RISCVInstPrinter.cpp @@ -210,7 +210,7 @@ void RISCVInstPrinter::printVTypeI(const MCInst *MI, unsigned OpNo, unsigned Imm = MI->getOperand(OpNo).getImm(); // Print the raw immediate for reserved values: vlmul[2:0]=4, vsew[2:0]=0b1xx, // or non-zero in bits 8 and above. - if (RISCVVType::getVLMUL(Imm) == RISCVII::VLMUL::LMUL_RESERVED || + if (RISCVVType::getVLMUL(Imm) == RISCVVType::VLMUL::LMUL_RESERVED || RISCVVType::getSEW(Imm) > 64 || (Imm >> 8) != 0) { O << formatImm(Imm); return; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp index 72b60439ca840..7ea4bd94c0065 100644 --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -279,7 +279,7 @@ void RISCVDAGToDAGISel::addVectorLoadStoreOperands( // none of the others do. All have passthru operands. For our pseudos, // all loads have policy operands. if (IsLoad) { - uint64_t Policy = RISCVII::MASK_AGNOSTIC; + uint64_t Policy = RISCVVType::MASK_AGNOSTIC; if (IsMasked) Policy = Node->getConstantOperandVal(CurOp++); SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT); @@ -294,7 +294,7 @@ void RISCVDAGToDAGISel::selectVLSEG(SDNode *Node, unsigned NF, bool IsMasked, SDLoc DL(Node); MVT VT = Node->getSimpleValueType(0); unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); unsigned CurOp = 2; SmallVector Operands; @@ -324,7 +324,7 @@ void RISCVDAGToDAGISel::selectVLSEGFF(SDNode *Node, unsigned NF, MVT VT = Node->getSimpleValueType(0); MVT XLenVT = Subtarget->getXLenVT(); unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); unsigned CurOp = 2; SmallVector Operands; @@ -355,7 +355,7 @@ void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, unsigned NF, bool IsMasked, SDLoc DL(Node); MVT VT = Node->getSimpleValueType(0); unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); unsigned CurOp = 2; SmallVector Operands; @@ -379,7 +379,7 @@ void RISCVDAGToDAGISel::selectVLXSEG(SDNode *Node, unsigned NF, bool IsMasked, "Element count mismatch"); #endif - RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); + RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { report_fatal_error("The V extension does not support EEW=64 for index " @@ -404,7 +404,7 @@ void RISCVDAGToDAGISel::selectVSSEG(SDNode *Node, unsigned NF, bool IsMasked, SDLoc DL(Node); MVT VT = Node->getOperand(2)->getSimpleValueType(0); unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); unsigned CurOp = 2; SmallVector Operands; @@ -430,7 +430,7 @@ void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, unsigned NF, bool IsMasked, SDLoc DL(Node); MVT VT = Node->getOperand(2)->getSimpleValueType(0); unsigned Log2SEW = Node->getConstantOperandVal(Node->getNumOperands() - 1); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); unsigned CurOp = 2; SmallVector Operands; @@ -454,7 +454,7 @@ void RISCVDAGToDAGISel::selectVSXSEG(SDNode *Node, unsigned NF, bool IsMasked, "Element count mismatch"); #endif - RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); + RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { report_fatal_error("The V extension does not support EEW=64 for index " @@ -495,7 +495,7 @@ void RISCVDAGToDAGISel::selectVSETVLI(SDNode *Node) { unsigned SEW = RISCVVType::decodeVSEW(Node->getConstantOperandVal(Offset) & 0x7); - RISCVII::VLMUL VLMul = static_cast( + RISCVVType::VLMUL VLMul = static_cast( Node->getConstantOperandVal(Offset + 1) & 0x7); unsigned VTypeI = RISCVVType::encodeVTYPE(VLMul, SEW, /*TailAgnostic*/ true, @@ -1672,7 +1672,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { default: llvm_unreachable("Unexpected LMUL!"); #define CASE_VMSLT_OPCODES(lmulenum, suffix) \ - case RISCVII::VLMUL::lmulenum: \ + case RISCVVType::lmulenum: \ VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \ : RISCV::PseudoVMSLT_VX_##suffix; \ VMSGTOpcode = IsUnsigned ? RISCV::PseudoVMSGTU_VX_##suffix \ @@ -1692,7 +1692,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { default: llvm_unreachable("Unexpected LMUL!"); #define CASE_VMNAND_VMSET_OPCODES(lmulenum, suffix) \ - case RISCVII::VLMUL::lmulenum: \ + case RISCVVType::lmulenum: \ VMNANDOpcode = RISCV::PseudoVMNAND_MM_##suffix; \ VMSetOpcode = RISCV::PseudoVMSET_M_##suffix; \ break; @@ -1768,7 +1768,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { default: llvm_unreachable("Unexpected LMUL!"); #define CASE_VMSLT_OPCODES(lmulenum, suffix) \ - case RISCVII::VLMUL::lmulenum: \ + case RISCVVType::lmulenum: \ VMSLTOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix \ : RISCV::PseudoVMSLT_VX_##suffix; \ VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_##suffix##_MASK \ @@ -1790,7 +1790,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { default: llvm_unreachable("Unexpected LMUL!"); #define CASE_VMXOR_VMANDN_VMOR_OPCODES(lmulenum, suffix) \ - case RISCVII::VLMUL::lmulenum: \ + case RISCVVType::lmulenum: \ VMXOROpcode = RISCV::PseudoVMXOR_MM_##suffix; \ VMANDNOpcode = RISCV::PseudoVMANDN_MM_##suffix; \ VMOROpcode = RISCV::PseudoVMOR_MM_##suffix; \ @@ -1839,7 +1839,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { } SDValue PolicyOp = - CurDAG->getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT); + CurDAG->getTargetConstant(RISCVVType::TAIL_AGNOSTIC, DL, XLenVT); if (IsCmpConstant) { SDValue Imm = @@ -2005,8 +2005,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && "Element count mismatch"); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); - RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { report_fatal_error("The V extension does not support EEW=64 for index " @@ -2058,7 +2058,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided, Operands, /*IsLoad=*/true); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(IsMasked, IsStrided, /*FF*/ false, Log2SEW, static_cast(LMUL)); @@ -2085,7 +2085,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { /*IsStridedOrIndexed*/ false, Operands, /*IsLoad=*/true); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VLEPseudo *P = RISCV::getVLEPseudo(IsMasked, /*Strided*/ false, /*FF*/ true, Log2SEW, static_cast(LMUL)); @@ -2211,8 +2211,8 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() && "Element count mismatch"); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); - RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { report_fatal_error("The V extension does not support EEW=64 for index " @@ -2250,7 +2250,7 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { addVectorLoadStoreOperands(Node, Log2SEW, DL, CurOp, IsMasked, IsStrided, Operands); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VSEPseudo *P = RISCV::getVSEPseudo( IsMasked, IsStrided, Log2SEW, static_cast(LMUL)); MachineSDNode *Store = @@ -2317,11 +2317,12 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { if (Idx != 0) break; - RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecContainerVT); + RISCVVType::VLMUL SubVecLMUL = + RISCVTargetLowering::getLMUL(SubVecContainerVT); [[maybe_unused]] bool IsSubVecPartReg = - SubVecLMUL == RISCVII::VLMUL::LMUL_F2 || - SubVecLMUL == RISCVII::VLMUL::LMUL_F4 || - SubVecLMUL == RISCVII::VLMUL::LMUL_F8; + SubVecLMUL == RISCVVType::VLMUL::LMUL_F2 || + SubVecLMUL == RISCVVType::VLMUL::LMUL_F4 || + SubVecLMUL == RISCVVType::VLMUL::LMUL_F8; assert((V.getValueType().isRISCVVectorTuple() || !IsSubVecPartReg || V.isUndef()) && "Expecting lowering to have created legal INSERT_SUBVECTORs when " @@ -2442,11 +2443,11 @@ void RISCVDAGToDAGISel::Select(SDNode *Node) { Ld->getBasePtr()}; if (IsStrided) Operands.push_back(CurDAG->getRegister(RISCV::X0, XLenVT)); - uint64_t Policy = RISCVII::MASK_AGNOSTIC | RISCVII::TAIL_AGNOSTIC; + uint64_t Policy = RISCVVType::MASK_AGNOSTIC | RISCVVType::TAIL_AGNOSTIC; SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, XLenVT); Operands.append({VL, SEW, PolicyOp, Ld->getChain()}); - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); const RISCV::VLEPseudo *P = RISCV::getVLEPseudo( /*IsMasked*/ false, IsStrided, /*FF*/ false, Log2SEW, static_cast(LMUL)); @@ -3985,7 +3986,7 @@ bool RISCVDAGToDAGISel::performCombineVMergeAndVOps(SDNode *N) { // preserve them. bool MergeVLShrunk = VL != OrigVL; uint64_t Policy = (isImplicitDef(Passthru) && !MergeVLShrunk) - ? RISCVII::TAIL_AGNOSTIC + ? RISCVVType::TAIL_AGNOSTIC : /*TUMU*/ 0; SDValue PolicyOp = CurDAG->getTargetConstant(Policy, DL, Subtarget->getXLenVT()); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index c40ab0d09bdf6..98c25bc93a8a2 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1110,7 +1110,7 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM, setOperationAction(FloatingPointLibCallOps, VT, Expand); // Custom split nxv32[b]f16 since nxv32[b]f32 is not legal. - if (getLMUL(VT) == RISCVII::VLMUL::LMUL_8) { + if (getLMUL(VT) == RISCVVType::LMUL_8) { setOperationAction(ZvfhminZvfbfminPromoteOps, VT, Custom); setOperationAction(ZvfhminZvfbfminPromoteVPOps, VT, Custom); } else { @@ -2361,25 +2361,25 @@ static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS, } } -RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) { +RISCVVType::VLMUL RISCVTargetLowering::getLMUL(MVT VT) { if (VT.isRISCVVectorTuple()) { if (VT.SimpleTy >= MVT::riscv_nxv1i8x2 && VT.SimpleTy <= MVT::riscv_nxv1i8x8) - return RISCVII::LMUL_F8; + return RISCVVType::LMUL_F8; if (VT.SimpleTy >= MVT::riscv_nxv2i8x2 && VT.SimpleTy <= MVT::riscv_nxv2i8x8) - return RISCVII::LMUL_F4; + return RISCVVType::LMUL_F4; if (VT.SimpleTy >= MVT::riscv_nxv4i8x2 && VT.SimpleTy <= MVT::riscv_nxv4i8x8) - return RISCVII::LMUL_F2; + return RISCVVType::LMUL_F2; if (VT.SimpleTy >= MVT::riscv_nxv8i8x2 && VT.SimpleTy <= MVT::riscv_nxv8i8x8) - return RISCVII::LMUL_1; + return RISCVVType::LMUL_1; if (VT.SimpleTy >= MVT::riscv_nxv16i8x2 && VT.SimpleTy <= MVT::riscv_nxv16i8x4) - return RISCVII::LMUL_2; + return RISCVVType::LMUL_2; if (VT.SimpleTy == MVT::riscv_nxv32i8x2) - return RISCVII::LMUL_4; + return RISCVVType::LMUL_4; llvm_unreachable("Invalid vector tuple type LMUL."); } @@ -2392,56 +2392,54 @@ RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) { default: llvm_unreachable("Invalid LMUL."); case 8: - return RISCVII::VLMUL::LMUL_F8; + return RISCVVType::LMUL_F8; case 16: - return RISCVII::VLMUL::LMUL_F4; + return RISCVVType::LMUL_F4; case 32: - return RISCVII::VLMUL::LMUL_F2; + return RISCVVType::LMUL_F2; case 64: - return RISCVII::VLMUL::LMUL_1; + return RISCVVType::LMUL_1; case 128: - return RISCVII::VLMUL::LMUL_2; + return RISCVVType::LMUL_2; case 256: - return RISCVII::VLMUL::LMUL_4; + return RISCVVType::LMUL_4; case 512: - return RISCVII::VLMUL::LMUL_8; + return RISCVVType::LMUL_8; } } -unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) { +unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVVType::VLMUL LMul) { switch (LMul) { default: llvm_unreachable("Invalid LMUL."); - case RISCVII::VLMUL::LMUL_F8: - case RISCVII::VLMUL::LMUL_F4: - case RISCVII::VLMUL::LMUL_F2: - case RISCVII::VLMUL::LMUL_1: + case RISCVVType::LMUL_F8: + case RISCVVType::LMUL_F4: + case RISCVVType::LMUL_F2: + case RISCVVType::LMUL_1: return RISCV::VRRegClassID; - case RISCVII::VLMUL::LMUL_2: + case RISCVVType::LMUL_2: return RISCV::VRM2RegClassID; - case RISCVII::VLMUL::LMUL_4: + case RISCVVType::LMUL_4: return RISCV::VRM4RegClassID; - case RISCVII::VLMUL::LMUL_8: + case RISCVVType::LMUL_8: return RISCV::VRM8RegClassID; } } unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) { - RISCVII::VLMUL LMUL = getLMUL(VT); - if (LMUL == RISCVII::VLMUL::LMUL_F8 || - LMUL == RISCVII::VLMUL::LMUL_F4 || - LMUL == RISCVII::VLMUL::LMUL_F2 || - LMUL == RISCVII::VLMUL::LMUL_1) { + RISCVVType::VLMUL LMUL = getLMUL(VT); + if (LMUL == RISCVVType::LMUL_F8 || LMUL == RISCVVType::LMUL_F4 || + LMUL == RISCVVType::LMUL_F2 || LMUL == RISCVVType::LMUL_1) { static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7, "Unexpected subreg numbering"); return RISCV::sub_vrm1_0 + Index; } - if (LMUL == RISCVII::VLMUL::LMUL_2) { + if (LMUL == RISCVVType::LMUL_2) { static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3, "Unexpected subreg numbering"); return RISCV::sub_vrm2_0 + Index; } - if (LMUL == RISCVII::VLMUL::LMUL_4) { + if (LMUL == RISCVVType::LMUL_4) { static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1, "Unexpected subreg numbering"); return RISCV::sub_vrm4_0 + Index; @@ -3347,9 +3345,9 @@ static SDValue getVSlidedown(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL, EVT VT, SDValue Passthru, SDValue Op, SDValue Offset, SDValue Mask, SDValue VL, - unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) { + unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED) { if (Passthru.isUndef()) - Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC; + Policy = RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC; SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT()); SDValue Ops[] = {Passthru, Op, Offset, Mask, VL, PolicyOp}; return DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, VT, Ops); @@ -3359,9 +3357,9 @@ static SDValue getVSlideup(SelectionDAG &DAG, const RISCVSubtarget &Subtarget, const SDLoc &DL, EVT VT, SDValue Passthru, SDValue Op, SDValue Offset, SDValue Mask, SDValue VL, - unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED) { + unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED) { if (Passthru.isUndef()) - Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC; + Policy = RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC; SDValue PolicyOp = DAG.getTargetConstant(Policy, DL, Subtarget.getXLenVT()); SDValue Ops[] = {Passthru, Op, Offset, Mask, VL, PolicyOp}; return DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, VT, Ops); @@ -4245,13 +4243,13 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, InstructionCost PerSlideCost = 1; switch (RISCVTargetLowering::getLMUL(ContainerVT)) { default: break; - case RISCVII::VLMUL::LMUL_2: + case RISCVVType::LMUL_2: PerSlideCost = 2; break; - case RISCVII::VLMUL::LMUL_4: + case RISCVVType::LMUL_4: PerSlideCost = 4; break; - case RISCVII::VLMUL::LMUL_8: + case RISCVVType::LMUL_8: PerSlideCost = 8; break; } @@ -4281,7 +4279,7 @@ static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG, VT.getVectorElementType().getSizeInBits() <= Subtarget.getFLen()) && "Illegal type which will result in reserved encoding"); - const unsigned Policy = RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC; + const unsigned Policy = RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC; SDValue Vec; UndefCount = 0; @@ -4773,11 +4771,12 @@ static SDValue lowerVECTOR_SHUFFLEAsVSlideup(const SDLoc &DL, MVT VT, auto TrueMask = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).first; // We slide up by the index that the subvector is being inserted at, and set // VL to the index + the number of elements being inserted. - unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED | RISCVII::MASK_AGNOSTIC; + unsigned Policy = + RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED | RISCVVType::MASK_AGNOSTIC; // If the we're adding a suffix to the in place vector, i.e. inserting right // up to the very end of it, then we don't actually care about the tail. if (NumSubElts + Index >= (int)NumElts) - Policy |= RISCVII::TAIL_AGNOSTIC; + Policy |= RISCVVType::TAIL_AGNOSTIC; InPlace = convertToScalableVector(ContainerVT, InPlace, DAG, Subtarget); ToInsert = convertToScalableVector(ContainerVT, ToInsert, DAG, Subtarget); @@ -5570,7 +5569,7 @@ static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG, if (LoV) Res = getVSlideup(DAG, Subtarget, DL, ContainerVT, Res, LoV, DAG.getConstant(InvRotate, DL, XLenVT), TrueMask, VL, - RISCVII::TAIL_AGNOSTIC); + RISCVVType::TAIL_AGNOSTIC); return convertFromScalableVector(VT, Res, DAG, Subtarget); } @@ -9457,10 +9456,10 @@ SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op, DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT)); // Use tail agnostic policy if Idx is the last index of Vec. - unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED; + unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED; if (VecVT.isFixedLengthVector() && isa(Idx) && Idx->getAsZExtVal() + 1 == VecVT.getVectorNumElements()) - Policy = RISCVII::TAIL_AGNOSTIC; + Policy = RISCVVType::TAIL_AGNOSTIC; SDValue Slideup = getVSlideup(DAG, Subtarget, DL, ContainerVT, Vec, ValInVec, Idx, Mask, InsertVL, Policy); @@ -9740,7 +9739,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG, } } if (!I32VL) { - RISCVII::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT); + RISCVVType::VLMUL Lmul = RISCVTargetLowering::getLMUL(VT); SDValue LMUL = DAG.getConstant(Lmul, DL, XLenVT); unsigned Sew = RISCVVType::encodeSEW(VT.getScalarSizeInBits()); SDValue SEW = DAG.getConstant(Sew, DL, XLenVT); @@ -9791,7 +9790,7 @@ static SDValue lowerVectorIntrinsicScalars(SDValue Op, SelectionDAG &DAG, if (MaskedOff.isUndef()) return Vec; // TAMU - if (Policy == RISCVII::TAIL_AGNOSTIC) + if (Policy == RISCVVType::TAIL_AGNOSTIC) return DAG.getNode(RISCVISD::VMERGE_VL, DL, VT, Mask, Vec, MaskedOff, DAG.getUNDEF(VT), AVL); // TUMA or TUMU: Currently we always emit tumu policy regardless of tuma. @@ -10547,7 +10546,7 @@ static SDValue lowerReductionSeq(unsigned RVVOpcode, MVT ResVT, DAG.getNode(ISD::INSERT_SUBVECTOR, DL, M1VT, DAG.getUNDEF(M1VT), InitialValue, DAG.getVectorIdxConstant(0, DL)); SDValue PassThru = NonZeroAVL ? DAG.getUNDEF(M1VT) : InitialValue; - SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT); + SDValue Policy = DAG.getTargetConstant(RISCVVType::TAIL_AGNOSTIC, DL, XLenVT); SDValue Ops[] = {PassThru, Vec, InitialValue, Mask, VL, Policy}; SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, Ops); return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction, @@ -10807,9 +10806,9 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, SDValue VL = DAG.getConstant(EndIndex, DL, XLenVT); // Use tail agnostic policy if we're inserting over Vec's tail. - unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED; + unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED; if (VecVT.isFixedLengthVector() && EndIndex == VecVT.getVectorNumElements()) - Policy = RISCVII::TAIL_AGNOSTIC; + Policy = RISCVVType::TAIL_AGNOSTIC; // If we're inserting into the lowest elements, use a tail undisturbed // vmv.v.v. @@ -10933,10 +10932,10 @@ SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op, VL = DAG.getElementCount(DL, XLenVT, SubVecVT.getVectorElementCount()); // Use tail agnostic policy if we're inserting over InterSubVT's tail. - unsigned Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED; + unsigned Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED; if (Subtarget.expandVScale(EndIndex) == Subtarget.expandVScale(InterSubVT.getVectorElementCount())) - Policy = RISCVII::TAIL_AGNOSTIC; + Policy = RISCVVType::TAIL_AGNOSTIC; // If we're inserting into the lowest elements, use a tail undisturbed // vmv.v.v. @@ -11108,7 +11107,7 @@ SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op, // was > M1 then the index would need to be a multiple of VLMAX, and so would // divide exactly. assert(RISCVVType::decodeVLMUL(getLMUL(ContainerSubVecVT)).second || - getLMUL(ContainerSubVecVT) == RISCVII::VLMUL::LMUL_1); + getLMUL(ContainerSubVecVT) == RISCVVType::LMUL_1); // If the vector type is an LMUL-group type, extract a subvector equal to the // nearest full vector register type. @@ -11719,7 +11718,7 @@ SDValue RISCVTargetLowering::lowerVECTOR_SPLICE(SDValue Op, DownOffset, TrueMask, UpOffset); return getVSlideup(DAG, Subtarget, DL, VecVT, SlideDown, V2, UpOffset, TrueMask, DAG.getRegister(RISCV::X0, XLenVT), - RISCVII::TAIL_AGNOSTIC); + RISCVVType::TAIL_AGNOSTIC); } SDValue @@ -11883,7 +11882,7 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op, Ops.push_back(Mask); Ops.push_back(VL); if (IntID == Intrinsic::riscv_vle_mask) - Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT)); + Ops.push_back(DAG.getTargetConstant(RISCVVType::TAIL_AGNOSTIC, DL, XLenVT)); SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); @@ -11902,7 +11901,7 @@ SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op, // overflow. if (IndexEltVT == MVT::i8 && VT.getVectorNumElements() > 256) { // FIXME: We need to do vector splitting manually for LMUL=8 cases. - assert(getLMUL(IndexVT) != RISCVII::LMUL_8); + assert(getLMUL(IndexVT) != RISCVVType::LMUL_8); IndexVT = IndexVT.changeVectorElementType(MVT::i16); UseVRGATHEREI16 = true; } @@ -12698,7 +12697,7 @@ RISCVTargetLowering::lowerVPSpliceExperimental(SDValue Op, getVSlidedown(DAG, Subtarget, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Op1, DownOffset, Mask, UpOffset); SDValue Result = getVSlideup(DAG, Subtarget, DL, ContainerVT, SlideDown, Op2, - UpOffset, Mask, EVL2, RISCVII::TAIL_AGNOSTIC); + UpOffset, Mask, EVL2, RISCVVType::TAIL_AGNOSTIC); if (IsMaskVector) { // Truncate Result back to a mask vector (Result has same EVL as Op2) @@ -12915,7 +12914,8 @@ SDValue RISCVTargetLowering::lowerVPStridedLoad(SDValue Op, } Ops.push_back(VPNode->getVectorLength()); if (!IsUnmasked) { - SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT); + SDValue Policy = + DAG.getTargetConstant(RISCVVType::TAIL_AGNOSTIC, DL, XLenVT); Ops.push_back(Policy); } @@ -13053,7 +13053,7 @@ SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op, Ops.push_back(Mask); Ops.push_back(VL); if (!IsUnmasked) - Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT)); + Ops.push_back(DAG.getTargetConstant(RISCVVType::TAIL_AGNOSTIC, DL, XLenVT)); SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other}); SDValue Result = @@ -19553,8 +19553,8 @@ void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op, case Intrinsic::riscv_vsetvlimax: { bool HasAVL = IntNo == Intrinsic::riscv_vsetvli; unsigned VSEW = Op.getConstantOperandVal(HasAVL + 1); - RISCVII::VLMUL VLMUL = - static_cast(Op.getConstantOperandVal(HasAVL + 2)); + RISCVVType::VLMUL VLMUL = + static_cast(Op.getConstantOperandVal(HasAVL + 2)); unsigned SEW = RISCVVType::decodeVSEW(VSEW); auto [LMul, Fractional] = RISCVVType::decodeVLMUL(VLMUL); uint64_t MaxVL = Subtarget.getRealMaxVLen() / SEW; @@ -20168,7 +20168,7 @@ static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI, // Helper to find Masked Pseudo instruction from MC instruction, LMUL and SEW. static const RISCV::RISCVMaskedPseudoInfo * -lookupMaskedIntrinsic(uint16_t MCOpcode, RISCVII::VLMUL LMul, unsigned SEW) { +lookupMaskedIntrinsic(uint16_t MCOpcode, RISCVVType::VLMUL LMul, unsigned SEW) { const RISCVVInversePseudosTable::PseudoInfo *Inverse = RISCVVInversePseudosTable::getBaseInfo(MCOpcode, LMul, SEW); assert(Inverse && "Unexpected LMUL and SEW pair for instruction"); @@ -20211,7 +20211,7 @@ static MachineBasicBlock *emitVFROUND_NOEXCEPT_MASK(MachineInstr &MI, /*IsImp*/ true)); // Emit a VFCVT_F_X - RISCVII::VLMUL LMul = RISCVII::getLMul(MI.getDesc().TSFlags); + RISCVVType::VLMUL LMul = RISCVII::getLMul(MI.getDesc().TSFlags); unsigned Log2SEW = MI.getOperand(RISCVII::getSEWOpNum(MI.getDesc())).getImm(); // There is no E8 variant for VFCVT_F_X. assert(Log2SEW >= 4); @@ -23262,13 +23262,13 @@ bool RISCVTargetLowering::lowerDeinterleavedIntrinsicToVPLoad( Load->getModule(), IntrMaskIds[Factor - 2], {VecTupTy, Mask->getType(), EVL->getType()}); - Value *Operands[] = { - PoisonVal, - Load->getArgOperand(0), - Mask, - EVL, - ConstantInt::get(XLenTy, RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC), - ConstantInt::get(XLenTy, Log2_64(SEW))}; + Value *Operands[] = {PoisonVal, + Load->getArgOperand(0), + Mask, + EVL, + ConstantInt::get(XLenTy, RISCVVType::TAIL_AGNOSTIC | + RISCVVType::MASK_AGNOSTIC), + ConstantInt::get(XLenTy, Log2_64(SEW))}; CallInst *VlsegN = Builder.CreateCall(VlsegNFunc, Operands); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h index e9dd8ff96fa37..26b888653c81d 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -823,7 +823,7 @@ class RISCVTargetLowering : public TargetLowering { // Return the value of VLMax for the given vector type (i.e. SEW and LMUL) SDValue computeVLMax(MVT VecVT, const SDLoc &DL, SelectionDAG &DAG) const; - static RISCVII::VLMUL getLMUL(MVT VT); + static RISCVVType::VLMUL getLMUL(MVT VT); inline static unsigned computeVLMAX(unsigned VectorBits, unsigned EltSize, unsigned MinSize) { // Original equation: @@ -839,7 +839,7 @@ class RISCVTargetLowering : public TargetLowering { static std::pair computeVLMAXBounds(MVT ContainerVT, const RISCVSubtarget &Subtarget); - static unsigned getRegClassIDForLMUL(RISCVII::VLMUL LMul); + static unsigned getRegClassIDForLMUL(RISCVVType::VLMUL LMul); static unsigned getSubregIndexByMVT(MVT VT, unsigned Index); static unsigned getRegClassIDForVecVT(MVT VT); static std::pair diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp index ffc7e09368824..7433603daff85 100644 --- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp +++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp @@ -342,7 +342,7 @@ inline raw_ostream &operator<<(raw_ostream &OS, const DemandedFields &DF) { } #endif -static bool isLMUL1OrSmaller(RISCVII::VLMUL LMUL) { +static bool isLMUL1OrSmaller(RISCVVType::VLMUL LMUL) { auto [LMul, Fractional] = RISCVVType::decodeVLMUL(LMUL); return Fractional || LMul == 1; } @@ -564,7 +564,7 @@ class VSETVLIInfo { } State = Uninitialized; // Fields from VTYPE. - RISCVII::VLMUL VLMul = RISCVII::LMUL_1; + RISCVVType::VLMUL VLMul = RISCVVType::LMUL_1; uint8_t SEW = 0; uint8_t TailAgnostic : 1; uint8_t MaskAgnostic : 1; @@ -642,7 +642,7 @@ class VSETVLIInfo { } unsigned getSEW() const { return SEW; } - RISCVII::VLMUL getVLMUL() const { return VLMul; } + RISCVVType::VLMUL getVLMUL() const { return VLMul; } bool getTailAgnostic() const { return TailAgnostic; } bool getMaskAgnostic() const { return MaskAgnostic; } @@ -707,7 +707,7 @@ class VSETVLIInfo { TailAgnostic = RISCVVType::isTailAgnostic(VType); MaskAgnostic = RISCVVType::isMaskAgnostic(VType); } - void setVTYPE(RISCVII::VLMUL L, unsigned S, bool TA, bool MA) { + void setVTYPE(RISCVVType::VLMUL L, unsigned S, bool TA, bool MA) { assert(isValid() && !isUnknown() && "Can't set VTYPE for uninitialized or unknown"); VLMul = L; @@ -716,7 +716,7 @@ class VSETVLIInfo { MaskAgnostic = MA; } - void setVLMul(RISCVII::VLMUL VLMul) { this->VLMul = VLMul; } + void setVLMul(RISCVVType::VLMUL VLMul) { this->VLMul = VLMul; } unsigned encodeVTYPE() const { assert(isValid() && !isUnknown() && !SEWLMULRatioOnly && @@ -1018,7 +1018,7 @@ RISCVInsertVSETVLI::getInfoForVSETVLI(const MachineInstr &MI) const { } static unsigned computeVLMAX(unsigned VLEN, unsigned SEW, - RISCVII::VLMUL VLMul) { + RISCVVType::VLMUL VLMul) { auto [LMul, Fractional] = RISCVVType::decodeVLMUL(VLMul); if (Fractional) VLEN = VLEN / LMul; @@ -1043,17 +1043,18 @@ RISCVInsertVSETVLI::computeInfoForInstr(const MachineInstr &MI) const { if (RISCVII::hasVecPolicyOp(TSFlags)) { const MachineOperand &Op = MI.getOperand(MI.getNumExplicitOperands() - 1); uint64_t Policy = Op.getImm(); - assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) && + assert(Policy <= + (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC) && "Invalid Policy Value"); - TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC; - MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC; + TailAgnostic = Policy & RISCVVType::TAIL_AGNOSTIC; + MaskAgnostic = Policy & RISCVVType::MASK_AGNOSTIC; } if (!RISCVII::usesMaskPolicy(TSFlags)) MaskAgnostic = true; } - RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags); + RISCVVType::VLMUL VLMul = RISCVII::getLMul(TSFlags); unsigned Log2SEW = MI.getOperand(getSEWOpNum(MI)).getImm(); // A Log2SEW of 0 is an operation on mask registers only. @@ -1245,8 +1246,7 @@ void RISCVInsertVSETVLI::transferBefore(VSETVLIInfo &Info, // be coalesced into another vsetvli since we won't demand any fields. VSETVLIInfo NewInfo; // Need a new VSETVLIInfo to clear SEWLMULRatioOnly NewInfo.setAVLImm(1); - NewInfo.setVTYPE(RISCVII::VLMUL::LMUL_1, /*sew*/ 8, /*ta*/ true, - /*ma*/ true); + NewInfo.setVTYPE(RISCVVType::LMUL_1, /*sew*/ 8, /*ta*/ true, /*ma*/ true); Info = NewInfo; return; } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp index 456fb66917216..8f7db34561749 100644 --- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp @@ -193,7 +193,7 @@ static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, const MachineBasicBlock &MBB, MachineBasicBlock::const_iterator MBBI, MachineBasicBlock::const_iterator &DefMBBI, - RISCVII::VLMUL LMul) { + RISCVVType::VLMUL LMul) { if (PreferWholeRegisterMove) return false; @@ -223,7 +223,7 @@ static bool isConvertibleToVMV_V_V(const RISCVSubtarget &STI, if (!FirstVSetVLI) { FirstVSetVLI = true; unsigned FirstVType = MBBI->getOperand(2).getImm(); - RISCVII::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType); + RISCVVType::VLMUL FirstLMul = RISCVVType::getVLMUL(FirstVType); FirstSEW = RISCVVType::getSEW(FirstVType); // The first encountered vsetvli must have the same lmul as the // register class of COPY. @@ -326,7 +326,7 @@ void RISCVInstrInfo::copyPhysRegVector( const DebugLoc &DL, MCRegister DstReg, MCRegister SrcReg, bool KillSrc, const TargetRegisterClass *RegClass) const { const TargetRegisterInfo *TRI = STI.getRegisterInfo(); - RISCVII::VLMUL LMul = RISCVRI::getLMul(RegClass->TSFlags); + RISCVVType::VLMUL LMul = RISCVRI::getLMul(RegClass->TSFlags); unsigned NF = RISCVRI::getNF(RegClass->TSFlags); uint16_t SrcEncoding = TRI->getEncodingValue(SrcReg); @@ -345,7 +345,7 @@ void RISCVInstrInfo::copyPhysRegVector( unsigned I = 0; auto GetCopyInfo = [&](uint16_t SrcEncoding, uint16_t DstEncoding) - -> std::tuple std::tuple { if (ReversedCopy) { // For reversed copying, if there are enough aligned registers(8/4/2), we @@ -357,40 +357,40 @@ void RISCVInstrInfo::copyPhysRegVector( uint16_t Diff = DstEncoding - SrcEncoding; if (I + 8 <= NumRegs && Diff >= 8 && SrcEncoding % 8 == 7 && DstEncoding % 8 == 7) - return {RISCVII::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V, + return {RISCVVType::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V, RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8}; if (I + 4 <= NumRegs && Diff >= 4 && SrcEncoding % 4 == 3 && DstEncoding % 4 == 3) - return {RISCVII::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V, + return {RISCVVType::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V, RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4}; if (I + 2 <= NumRegs && Diff >= 2 && SrcEncoding % 2 == 1 && DstEncoding % 2 == 1) - return {RISCVII::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V, + return {RISCVVType::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V, RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2}; // Or we should do LMUL1 copying. - return {RISCVII::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V, + return {RISCVVType::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V, RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1}; } // For forward copying, if source register encoding and destination register // encoding are aligned to 8/4/2, we can do a LMUL8/4/2 copying. if (I + 8 <= NumRegs && SrcEncoding % 8 == 0 && DstEncoding % 8 == 0) - return {RISCVII::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V, + return {RISCVVType::LMUL_8, RISCV::VRM8RegClass, RISCV::VMV8R_V, RISCV::PseudoVMV_V_V_M8, RISCV::PseudoVMV_V_I_M8}; if (I + 4 <= NumRegs && SrcEncoding % 4 == 0 && DstEncoding % 4 == 0) - return {RISCVII::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V, + return {RISCVVType::LMUL_4, RISCV::VRM4RegClass, RISCV::VMV4R_V, RISCV::PseudoVMV_V_V_M4, RISCV::PseudoVMV_V_I_M4}; if (I + 2 <= NumRegs && SrcEncoding % 2 == 0 && DstEncoding % 2 == 0) - return {RISCVII::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V, + return {RISCVVType::LMUL_2, RISCV::VRM2RegClass, RISCV::VMV2R_V, RISCV::PseudoVMV_V_V_M2, RISCV::PseudoVMV_V_I_M2}; // Or we should do LMUL1 copying. - return {RISCVII::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V, + return {RISCVVType::LMUL_1, RISCV::VRRegClass, RISCV::VMV1R_V, RISCV::PseudoVMV_V_V_M1, RISCV::PseudoVMV_V_I_M1}; }; auto FindRegWithEncoding = [TRI](const TargetRegisterClass &RegClass, uint16_t Encoding) { MCRegister Reg = RISCV::V0 + Encoding; - if (RISCVRI::getLMul(RegClass.TSFlags) == RISCVII::LMUL_1) + if (RISCVRI::getLMul(RegClass.TSFlags) == RISCVVType::LMUL_1) return Reg; return TRI->getMatchingSuperReg(Reg, RISCV::sub_vrm1_0, &RegClass); }; @@ -2580,7 +2580,8 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI, Ok = Imm >= 0 && Imm < RISCVCC::COND_INVALID; break; case RISCVOp::OPERAND_VEC_POLICY: - Ok = (Imm & (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC)) == Imm; + Ok = (Imm & + (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC)) == Imm; break; case RISCVOp::OPERAND_SEW: Ok = (isUInt<5>(Imm) && RISCVVType::isValidSEW(1 << Imm)); @@ -2648,7 +2649,7 @@ bool RISCVInstrInfo::verifyInstruction(const MachineInstr &MI, return false; } uint64_t Policy = MI.getOperand(OpIdx).getImm(); - if (Policy > (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC)) { + if (Policy > (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC)) { ErrInfo = "Invalid Policy Value"; return false; } @@ -3234,10 +3235,10 @@ std::string RISCVInstrInfo::createMIROperandComment( } case RISCVOp::OPERAND_VEC_POLICY: unsigned Policy = Op.getImm(); - assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) && + assert(Policy <= (RISCVVType::TAIL_AGNOSTIC | RISCVVType::MASK_AGNOSTIC) && "Invalid Policy Value"); - OS << (Policy & RISCVII::TAIL_AGNOSTIC ? "ta" : "tu") << ", " - << (Policy & RISCVII::MASK_AGNOSTIC ? "ma" : "mu"); + OS << (Policy & RISCVVType::TAIL_AGNOSTIC ? "ta" : "tu") << ", " + << (Policy & RISCVVType::MASK_AGNOSTIC ? "ma" : "mu"); break; } diff --git a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h index 6c4e9c7b1bdc7..0830191dde3f4 100644 --- a/llvm/lib/Target/RISCV/RISCVRegisterInfo.h +++ b/llvm/lib/Target/RISCV/RISCVRegisterInfo.h @@ -43,8 +43,9 @@ static inline bool isVRegClass(uint64_t TSFlags) { } /// \returns the LMUL for the register class. -static inline RISCVII::VLMUL getLMul(uint64_t TSFlags) { - return static_cast((TSFlags & VLMulShiftMask) >> VLMulShift); +static inline RISCVVType::VLMUL getLMul(uint64_t TSFlags) { + return static_cast((TSFlags & VLMulShiftMask) >> + VLMulShift); } /// \returns the NF for the register class. diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp index da77bae18962c..79e3b9ee09744 100644 --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -765,9 +765,11 @@ InstructionCost RISCVTTIImpl::getShuffleCost(TTI::ShuffleKind Kind, } static unsigned isM1OrSmaller(MVT VT) { - RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); - return (LMUL == RISCVII::VLMUL::LMUL_F8 || LMUL == RISCVII::VLMUL::LMUL_F4 || - LMUL == RISCVII::VLMUL::LMUL_F2 || LMUL == RISCVII::VLMUL::LMUL_1); + RISCVVType::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); + return (LMUL == RISCVVType::VLMUL::LMUL_F8 || + LMUL == RISCVVType::VLMUL::LMUL_F4 || + LMUL == RISCVVType::VLMUL::LMUL_F2 || + LMUL == RISCVVType::VLMUL::LMUL_1); } InstructionCost RISCVTTIImpl::getScalarizationOverhead( diff --git a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp index 1ba7f0b522a2b..e5a98598370ec 100644 --- a/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp +++ b/llvm/lib/Target/RISCV/RISCVVLOptimizer.cpp @@ -65,13 +65,13 @@ class RISCVVLOptimizer : public MachineFunctionPass { /// Represents the EMUL and EEW of a MachineOperand. struct OperandInfo { // Represent as 1,2,4,8, ... and fractional indicator. This is because - // EMUL can take on values that don't map to RISCVII::VLMUL values exactly. + // EMUL can take on values that don't map to RISCVVType::VLMUL values exactly. // For example, a mask operand can have an EMUL less than MF8. std::optional> EMUL; unsigned Log2EEW; - OperandInfo(RISCVII::VLMUL EMUL, unsigned Log2EEW) + OperandInfo(RISCVVType::VLMUL EMUL, unsigned Log2EEW) : EMUL(RISCVVType::decodeVLMUL(EMUL)), Log2EEW(Log2EEW) {} OperandInfo(std::pair EMUL, unsigned Log2EEW) @@ -141,7 +141,7 @@ static raw_ostream &operator<<(raw_ostream &OS, /// SEW are from the TSFlags of MI. static std::pair getEMULEqualsEEWDivSEWTimesLMUL(unsigned Log2EEW, const MachineInstr &MI) { - RISCVII::VLMUL MIVLMUL = RISCVII::getLMul(MI.getDesc().TSFlags); + RISCVVType::VLMUL MIVLMUL = RISCVII::getLMul(MI.getDesc().TSFlags); auto [MILMUL, MILMULIsFractional] = RISCVVType::decodeVLMUL(MIVLMUL); unsigned MILog2SEW = MI.getOperand(RISCVII::getSEWOpNum(MI.getDesc())).getImm(); diff --git a/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp b/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp index 0bddbacc89e3e..ee90868d252e4 100644 --- a/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp +++ b/llvm/lib/Target/RISCV/RISCVVectorMaskDAGMutation.cpp @@ -123,7 +123,7 @@ class RISCVVectorMaskDAGMutation : public ScheduleDAGMutation { // For LMUL=8 cases, there will be more possibilities to spill. // FIXME: We should use RegPressureTracker to do fine-grained // controls. - RISCVII::getLMul(MI->getDesc().TSFlags) != RISCVII::LMUL_8) + RISCVII::getLMul(MI->getDesc().TSFlags) != RISCVVType::LMUL_8) DAG->addEdge(&SU, SDep(NearestUseV0SU, SDep::Artificial)); } } diff --git a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp index 5ef1c9444f59a..7c05ff1f1a70e 100644 --- a/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp +++ b/llvm/lib/Target/RISCV/RISCVVectorPeephole.cpp @@ -371,7 +371,7 @@ bool RISCVVectorPeephole::convertAllOnesVMergeToVMv(MachineInstr &MI) const { MI.removeOperand(2); // False operand MI.removeOperand(3); // Mask operand MI.addOperand( - MachineOperand::CreateImm(RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED)); + MachineOperand::CreateImm(RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED)); // vmv.v.v doesn't have a mask operand, so we may be able to inflate the // register class for the destination and passthru operands e.g. VRNoV0 -> VR @@ -438,7 +438,7 @@ bool RISCVVectorPeephole::convertSameMaskVMergeToVMv(MachineInstr &MI) { MI.removeOperand(2); // False operand MI.removeOperand(3); // Mask operand MI.addOperand( - MachineOperand::CreateImm(RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED)); + MachineOperand::CreateImm(RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED)); // vmv.v.v doesn't have a mask operand, so we may be able to inflate the // register class for the destination and passthru operands e.g. VRNoV0 -> VR @@ -580,7 +580,7 @@ bool RISCVVectorPeephole::foldUndefPassthruVMV_V_V(MachineInstr &MI) { Src->getOperand(RISCVII::getVecPolicyOpNum(Src->getDesc())); if (RISCV::isVLKnownLE(MIVL, SrcVL)) - SrcPolicy.setImm(SrcPolicy.getImm() | RISCVII::TAIL_AGNOSTIC); + SrcPolicy.setImm(SrcPolicy.getImm() | RISCVVType::TAIL_AGNOSTIC); } MRI->replaceRegWith(MI.getOperand(0).getReg(), MI.getOperand(2).getReg()); @@ -646,10 +646,10 @@ bool RISCVVectorPeephole::foldVMV_V_V(MachineInstr &MI) { } // If MI was tail agnostic and the VL didn't increase, preserve it. - int64_t Policy = RISCVII::TAIL_UNDISTURBED_MASK_UNDISTURBED; - if ((MI.getOperand(5).getImm() & RISCVII::TAIL_AGNOSTIC) && + int64_t Policy = RISCVVType::TAIL_UNDISTURBED_MASK_UNDISTURBED; + if ((MI.getOperand(5).getImm() & RISCVVType::TAIL_AGNOSTIC) && RISCV::isVLKnownLE(MI.getOperand(3), SrcVL)) - Policy |= RISCVII::TAIL_AGNOSTIC; + Policy |= RISCVVType::TAIL_AGNOSTIC; Src->getOperand(RISCVII::getVecPolicyOpNum(Src->getDesc())).setImm(Policy); MRI->replaceRegWith(MI.getOperand(0).getReg(), Src->getOperand(0).getReg()); diff --git a/llvm/lib/TargetParser/RISCVTargetParser.cpp b/llvm/lib/TargetParser/RISCVTargetParser.cpp index 625645a99e12f..4111f8bfd2662 100644 --- a/llvm/lib/TargetParser/RISCVTargetParser.cpp +++ b/llvm/lib/TargetParser/RISCVTargetParser.cpp @@ -165,12 +165,12 @@ namespace RISCVVType { // 6 | vta | Vector tail agnostic // 5:3 | vsew[2:0] | Standard element width (SEW) setting // 2:0 | vlmul[2:0] | Vector register group multiplier (LMUL) setting -unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, +unsigned encodeVTYPE(VLMUL VLMul, unsigned SEW, bool TailAgnostic, bool MaskAgnostic) { assert(isValidSEW(SEW) && "Invalid SEW"); - unsigned VLMULBits = static_cast(VLMUL); + unsigned VLMulBits = static_cast(VLMul); unsigned VSEWBits = encodeSEW(SEW); - unsigned VTypeI = (VSEWBits << 3) | (VLMULBits & 0x7); + unsigned VTypeI = (VSEWBits << 3) | (VLMulBits & 0x7); if (TailAgnostic) VTypeI |= 0x40; if (MaskAgnostic) @@ -179,19 +179,19 @@ unsigned encodeVTYPE(RISCVII::VLMUL VLMUL, unsigned SEW, bool TailAgnostic, return VTypeI; } -std::pair decodeVLMUL(RISCVII::VLMUL VLMUL) { - switch (VLMUL) { +std::pair decodeVLMUL(VLMUL VLMul) { + switch (VLMul) { default: llvm_unreachable("Unexpected LMUL value!"); - case RISCVII::VLMUL::LMUL_1: - case RISCVII::VLMUL::LMUL_2: - case RISCVII::VLMUL::LMUL_4: - case RISCVII::VLMUL::LMUL_8: - return std::make_pair(1 << static_cast(VLMUL), false); - case RISCVII::VLMUL::LMUL_F2: - case RISCVII::VLMUL::LMUL_F4: - case RISCVII::VLMUL::LMUL_F8: - return std::make_pair(1 << (8 - static_cast(VLMUL)), true); + case LMUL_1: + case LMUL_2: + case LMUL_4: + case LMUL_8: + return std::make_pair(1 << static_cast(VLMul), false); + case LMUL_F2: + case LMUL_F4: + case LMUL_F8: + return std::make_pair(1 << (8 - static_cast(VLMul)), true); } } @@ -220,7 +220,7 @@ void printVType(unsigned VType, raw_ostream &OS) { OS << ", mu"; } -unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul) { +unsigned getSEWLMULRatio(unsigned SEW, VLMUL VLMul) { unsigned LMul; bool Fractional; std::tie(LMul, Fractional) = decodeVLMUL(VLMul); @@ -232,9 +232,8 @@ unsigned getSEWLMULRatio(unsigned SEW, RISCVII::VLMUL VLMul) { return (SEW * 8) / LMul; } -std::optional -getSameRatioLMUL(unsigned SEW, RISCVII::VLMUL VLMUL, unsigned EEW) { - unsigned Ratio = RISCVVType::getSEWLMULRatio(SEW, VLMUL); +std::optional getSameRatioLMUL(unsigned SEW, VLMUL VLMul, unsigned EEW) { + unsigned Ratio = RISCVVType::getSEWLMULRatio(SEW, VLMul); unsigned EMULFixedPoint = (EEW * 8) / Ratio; bool Fractional = EMULFixedPoint < 8; unsigned EMUL = Fractional ? 8 / EMULFixedPoint : EMULFixedPoint / 8; diff --git a/llvm/unittests/TargetParser/RISCVTargetParserTest.cpp b/llvm/unittests/TargetParser/RISCVTargetParserTest.cpp index 68338b569a208..63ac8f993ecdc 100644 --- a/llvm/unittests/TargetParser/RISCVTargetParserTest.cpp +++ b/llvm/unittests/TargetParser/RISCVTargetParserTest.cpp @@ -14,20 +14,20 @@ using namespace llvm; namespace { TEST(RISCVVType, CheckSameRatioLMUL) { // Smaller LMUL. - EXPECT_EQ(RISCVII::LMUL_1, - RISCVVType::getSameRatioLMUL(16, RISCVII::LMUL_2, 8)); - EXPECT_EQ(RISCVII::LMUL_F2, - RISCVVType::getSameRatioLMUL(16, RISCVII::LMUL_1, 8)); + EXPECT_EQ(RISCVVType::LMUL_1, + RISCVVType::getSameRatioLMUL(16, RISCVVType::LMUL_2, 8)); + EXPECT_EQ(RISCVVType::LMUL_F2, + RISCVVType::getSameRatioLMUL(16, RISCVVType::LMUL_1, 8)); // Smaller fractional LMUL. - EXPECT_EQ(RISCVII::LMUL_F8, - RISCVVType::getSameRatioLMUL(16, RISCVII::LMUL_F4, 8)); + EXPECT_EQ(RISCVVType::LMUL_F8, + RISCVVType::getSameRatioLMUL(16, RISCVVType::LMUL_F4, 8)); // Bigger LMUL. - EXPECT_EQ(RISCVII::LMUL_2, - RISCVVType::getSameRatioLMUL(8, RISCVII::LMUL_1, 16)); - EXPECT_EQ(RISCVII::LMUL_1, - RISCVVType::getSameRatioLMUL(8, RISCVII::LMUL_F2, 16)); + EXPECT_EQ(RISCVVType::LMUL_2, + RISCVVType::getSameRatioLMUL(8, RISCVVType::LMUL_1, 16)); + EXPECT_EQ(RISCVVType::LMUL_1, + RISCVVType::getSameRatioLMUL(8, RISCVVType::LMUL_F2, 16)); // Bigger fractional LMUL. - EXPECT_EQ(RISCVII::LMUL_F2, - RISCVVType::getSameRatioLMUL(8, RISCVII::LMUL_F4, 16)); + EXPECT_EQ(RISCVVType::LMUL_F2, + RISCVVType::getSameRatioLMUL(8, RISCVVType::LMUL_F4, 16)); } } // namespace