diff --git a/llvm/include/llvm/IR/InlineAsm.h b/llvm/include/llvm/IR/InlineAsm.h index b489b842d41ca..c1bf767f7c291 100644 --- a/llvm/include/llvm/IR/InlineAsm.h +++ b/llvm/include/llvm/IR/InlineAsm.h @@ -15,6 +15,7 @@ #ifndef LLVM_IR_INLINEASM_H #define LLVM_IR_INLINEASM_H +#include "llvm/ADT/Bitfields.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/StringRef.h" #include "llvm/IR/Value.h" @@ -196,23 +197,6 @@ class InlineAsm final : public Value { return V->getValueID() == Value::InlineAsmVal; } - // These are helper methods for dealing with flags in the INLINEASM SDNode - // in the backend. - // - // The encoding of the flag word is currently: - // Bits 2-0 - A Kind::* value indicating the kind of the operand. - // Bits 15-3 - The number of SDNode operands associated with this inline - // assembly operand. - // If bit 31 is set: - // Bit 30-16 - The operand number that this operand must match. - // When bits 2-0 are Kind::Mem, the Constraint_* value must be - // obtained from the flags for this operand number. - // Else if bits 2-0 are Kind::Mem: - // Bit 30-16 - A Constraint_* value indicating the original constraint - // code. - // Else: - // Bit 30-16 - The register class ID to use for the operand. - enum : uint32_t { // Fixed operands on an INLINEASM SDNode. Op_InputChain = 0, @@ -241,6 +225,7 @@ class InlineAsm final : public Value { // Addresses are included here as they need to be treated the same by the // backend, the only difference is that they are not used to actaully // access memory by the instruction. + // TODO: convert to enum? Constraint_Unknown = 0, Constraint_es, Constraint_i, @@ -274,15 +259,12 @@ class InlineAsm final : public Value { Constraint_ZT, Constraints_Max = Constraint_ZT, - Constraints_ShiftAmount = 16, - - Flag_MatchingOperand = 0x80000000 }; // Inline asm operands map to multiple SDNode / MachineInstr operands. // The first operand is an immediate describing the asm operand, the low // bits is the kind: - enum class Kind { + enum class Kind : uint8_t { RegUse = 1, // Input register, "r". RegDef = 2, // Output register, "=r". RegDefEarlyClobber = 3, // Early-clobber output register, "=&r". @@ -292,101 +274,149 @@ class InlineAsm final : public Value { Func = 7, // Address operand of function call }; - static unsigned getFlagWord(Kind Kind, unsigned NumOps) { - assert(((NumOps << 3) & ~0xffff) == 0 && "Too many inline asm operands!"); - return static_cast(Kind) | (NumOps << 3); - } - - static bool isRegDefKind(unsigned Flag) { - return getKind(Flag) == Kind::RegDef; - } - static bool isImmKind(unsigned Flag) { return getKind(Flag) == Kind::Imm; } - static bool isMemKind(unsigned Flag) { return getKind(Flag) == Kind::Mem; } - static bool isFuncKind(unsigned Flag) { return getKind(Flag) == Kind::Func; } - static bool isRegDefEarlyClobberKind(unsigned Flag) { - return getKind(Flag) == Kind::RegDefEarlyClobber; - } - static bool isClobberKind(unsigned Flag) { - return getKind(Flag) == Kind::Clobber; - } - - /// getFlagWordForMatchingOp - Augment an existing flag word returned by - /// getFlagWord with information indicating that this input operand is tied - /// to a previous output operand. - static unsigned getFlagWordForMatchingOp(unsigned InputFlag, - unsigned MatchedOperandNo) { - assert(MatchedOperandNo <= 0x7fff && "Too big matched operand"); - assert((InputFlag & ~0xffff) == 0 && "High bits already contain data"); - return InputFlag | Flag_MatchingOperand | (MatchedOperandNo << 16); - } - - /// getFlagWordForRegClass - Augment an existing flag word returned by - /// getFlagWord with the required register class for the following register - /// operands. - /// A tied use operand cannot have a register class, use the register class - /// from the def operand instead. - static unsigned getFlagWordForRegClass(unsigned InputFlag, unsigned RC) { - // Store RC + 1, reserve the value 0 to mean 'no register class'. - ++RC; - assert(!isImmKind(InputFlag) && "Immediates cannot have a register class"); - assert(!isMemKind(InputFlag) && "Memory operand cannot have a register class"); - assert(RC <= 0x7fff && "Too large register class ID"); - assert((InputFlag & ~0xffff) == 0 && "High bits already contain data"); - return InputFlag | (RC << 16); - } + // These are helper methods for dealing with flags in the INLINEASM SDNode + // in the backend. + // + // The encoding of Flag is currently: + // Bits 2-0 - A Kind::* value indicating the kind of the operand. + // Bits 15-3 - The number of SDNode operands associated with this inline + // assembly operand. + // If bit 31 is set: + // Bit 30-16 - The operand number that this operand must match. + // When bits 2-0 are Kind::Mem, the Constraint_* value must be + // obtained from the flags for this operand number. + // Else if bits 2-0 are Kind::Mem: + // Bit 30-16 - A Constraint_* value indicating the original constraint + // code. + // Else: + // Bit 30-16 - The register class ID to use for the operand. + // + // Bits 30-16 are called "Data" for lack of a better name. The getter is + // intentionally private; the public methods that rely on that private method + // should be used to check invariants first before accessing Data. + class Flag { + uint32_t Storage; + using KindField = Bitfield::Element; + using NumOperands = Bitfield::Element; + using Data = Bitfield::Element; + using IsMatched = Bitfield::Element; + + unsigned getData() const { return Bitfield::get(Storage); } + bool isMatched() const { return Bitfield::get(Storage); } + void setKind(Kind K) { Bitfield::set(Storage, K); } + void setNumOperands(unsigned N) { Bitfield::set(Storage, N); } + void setData(unsigned D) { Bitfield::set(Storage, D); } + void setIsMatched(bool B) { Bitfield::set(Storage, B); } + + public: + Flag() : Storage(0) {} + explicit Flag(uint32_t F) : Storage(F) {} + Flag(enum Kind K, unsigned NumOps) { + setKind(K); + setNumOperands(NumOps); + setData(0); + setIsMatched(false); + } + operator uint32_t() { return Storage; } + Kind getKind() const { return Bitfield::get(Storage); } + bool isRegUseKind() const { return getKind() == Kind::RegUse; } + bool isRegDefKind() const { return getKind() == Kind::RegDef; } + bool isRegDefEarlyClobberKind() const { + return getKind() == Kind::RegDefEarlyClobber; + } + bool isClobberKind() const { return getKind() == Kind::Clobber; } + bool isImmKind() const { return getKind() == Kind::Imm; } + bool isMemKind() const { return getKind() == Kind::Mem; } + bool isFuncKind() const { return getKind() == Kind::Func; } + StringRef getKindName() const { + switch (getKind()) { + case Kind::RegUse: + return "reguse"; + case Kind::RegDef: + return "regdef"; + case Kind::RegDefEarlyClobber: + return "regdef-ec"; + case Kind::Clobber: + return "clobber"; + case Kind::Imm: + return "imm"; + case Kind::Mem: + case Kind::Func: + return "mem"; + } + } - /// Augment an existing flag word returned by getFlagWord with the constraint - /// code for a memory constraint. - static unsigned getFlagWordForMem(unsigned InputFlag, unsigned Constraint) { - assert((isMemKind(InputFlag) || isFuncKind(InputFlag)) && - "InputFlag is not a memory (include function) constraint!"); - assert(Constraint <= 0x7fff && "Too large a memory constraint ID"); - assert(Constraint <= Constraints_Max && "Unknown constraint ID"); - assert((InputFlag & ~0xffff) == 0 && "High bits already contain data"); - return InputFlag | (Constraint << Constraints_ShiftAmount); - } + /// getNumOperandRegisters - Extract the number of registers field from the + /// inline asm operand flag. + unsigned getNumOperandRegisters() const { + return Bitfield::get(Storage); + } - static unsigned convertMemFlagWordToMatchingFlagWord(unsigned InputFlag) { - assert(isMemKind(InputFlag)); - return InputFlag & ~(0x7fff << Constraints_ShiftAmount); - } + /// isUseOperandTiedToDef - Return true if the flag of the inline asm + /// operand indicates it is an use operand that's matched to a def operand. + bool isUseOperandTiedToDef(unsigned &Idx) const { + if (!isMatched()) + return false; + Idx = getData(); + return true; + } - static Kind getKind(unsigned Flags) { return static_cast(Flags & 7); } + /// hasRegClassConstraint - Returns true if the flag contains a register + /// class constraint. Sets RC to the register class ID. + bool hasRegClassConstraint(unsigned &RC) const { + if (isMatched()) + return false; + // setRegClass() uses 0 to mean no register class, and otherwise stores + // RC + 1. + if (!getData()) + return false; + RC = getData() - 1; + return true; + } - static unsigned getMemoryConstraintID(unsigned Flag) { - assert((isMemKind(Flag) || isFuncKind(Flag)) && - "Not expected mem or function flang!"); - return (Flag >> Constraints_ShiftAmount) & 0x7fff; - } + // TODO: convert to enum? + unsigned getMemoryConstraintID() const { + assert((isMemKind() || isFuncKind()) && + "Not expected mem or function flag!"); + return getData(); + } - /// getNumOperandRegisters - Extract the number of registers field from the - /// inline asm operand flag. - static unsigned getNumOperandRegisters(unsigned Flag) { - return (Flag & 0xffff) >> 3; - } + /// setMatchingOp - Augment an existing flag with information indicating + /// that this input operand is tied to a previous output operand. + void setMatchingOp(unsigned MatchedOperandNo) { + assert(getData() == 0 && "Matching operand already set"); + setData(MatchedOperandNo); + setIsMatched(true); + } - /// isUseOperandTiedToDef - Return true if the flag of the inline asm - /// operand indicates it is an use operand that's matched to a def operand. - static bool isUseOperandTiedToDef(unsigned Flag, unsigned &Idx) { - if ((Flag & Flag_MatchingOperand) == 0) - return false; - Idx = (Flag & ~Flag_MatchingOperand) >> 16; - return true; - } + /// setRegClass - Augment an existing flag with the required register class + /// for the following register operands. A tied use operand cannot have a + /// register class, use the register class from the def operand instead. + void setRegClass(unsigned RC) { + assert(!isImmKind() && "Immediates cannot have a register class"); + assert(!isMemKind() && "Memory operand cannot have a register class"); + assert(getData() == 0 && "Register class already set"); + // Store RC + 1, reserve the value 0 to mean 'no register class'. + setData(RC + 1); + } - /// hasRegClassConstraint - Returns true if the flag contains a register - /// class constraint. Sets RC to the register class ID. - static bool hasRegClassConstraint(unsigned Flag, unsigned &RC) { - if (Flag & Flag_MatchingOperand) - return false; - unsigned High = Flag >> 16; - // getFlagWordForRegClass() uses 0 to mean no register class, and otherwise - // stores RC + 1. - if (!High) - return false; - RC = High - 1; - return true; - } + /// setMemConstraint - Augment an existing flag with the constraint code for + /// a memory constraint. + void setMemConstraint(unsigned Constraint) { + assert((isMemKind() || isFuncKind()) && + "Flag is not a memory or function constraint!"); + assert(Constraint <= Constraints_Max && "Unknown constraint ID"); + assert(getData() == 0 && "Mem constraint already set"); + setData(Constraint); + } + /// clearMemConstraint - Similar to setMemConstraint(0), but without the + /// assertion checking that the constraint has not been set previously. + void clearMemConstraint() { + assert((isMemKind() || isFuncKind()) && + "Flag is not a memory or function constraint!"); + setData(0); + } + }; static std::vector getExtraInfoNames(unsigned ExtraInfo) { std::vector Result; @@ -412,25 +442,6 @@ class InlineAsm final : public Value { return Result; } - static StringRef getKindName(Kind Kind) { - switch (Kind) { - case Kind::RegUse: - return "reguse"; - case Kind::RegDef: - return "regdef"; - case Kind::RegDefEarlyClobber: - return "regdef-ec"; - case Kind::Clobber: - return "clobber"; - case Kind::Imm: - return "imm"; - case Kind::Mem: - case Kind::Func: - return "mem"; - } - llvm_unreachable("Unknown operand kind"); - } - static StringRef getMemConstraintName(unsigned Constraint) { switch (Constraint) { case InlineAsm::Constraint_es: diff --git a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp index 083d1503330a3..d0ef3e5a19391 100644 --- a/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp +++ b/llvm/lib/CodeGen/AsmPrinter/AsmPrinterInlineAsm.cpp @@ -278,8 +278,8 @@ static void EmitInlineAsmStr(const char *AsmStr, const MachineInstr *MI, for (; Val; --Val) { if (OpNo >= MI->getNumOperands()) break; - unsigned OpFlags = MI->getOperand(OpNo).getImm(); - OpNo += InlineAsm::getNumOperandRegisters(OpFlags) + 1; + const InlineAsm::Flag F(MI->getOperand(OpNo).getImm()); + OpNo += F.getNumOperandRegisters() + 1; } // We may have a location metadata attached to the end of the @@ -288,7 +288,7 @@ static void EmitInlineAsmStr(const char *AsmStr, const MachineInstr *MI, if (OpNo >= MI->getNumOperands() || MI->getOperand(OpNo).isMetadata()) { Error = true; } else { - unsigned OpFlags = MI->getOperand(OpNo).getImm(); + const InlineAsm::Flag F(MI->getOperand(OpNo).getImm()); ++OpNo; // Skip over the ID number. // FIXME: Shouldn't arch-independent output template handling go into @@ -302,7 +302,7 @@ static void EmitInlineAsmStr(const char *AsmStr, const MachineInstr *MI, } else if (MI->getOperand(OpNo).isMBB()) { const MCSymbol *Sym = MI->getOperand(OpNo).getMBB()->getSymbol(); Sym->print(OS, AP->MAI); - } else if (InlineAsm::isMemKind(OpFlags)) { + } else if (F.isMemKind()) { Error = AP->PrintAsmMemoryOperand( MI, OpNo, Modifier[0] ? Modifier : nullptr, OS); } else { @@ -379,14 +379,14 @@ void AsmPrinter::emitInlineAsm(const MachineInstr *MI) const { const MachineOperand &MO = MI->getOperand(I); if (!MO.isImm()) continue; - unsigned Flags = MO.getImm(); - if (InlineAsm::getKind(Flags) == InlineAsm::Kind::Clobber) { + const InlineAsm::Flag F(MO.getImm()); + if (F.isClobberKind()) { Register Reg = MI->getOperand(I + 1).getReg(); if (!TRI->isAsmClobberable(*MF, Reg)) RestrRegs.push_back(Reg); } // Skip to one before the next operand descriptor, if it exists. - I += InlineAsm::getNumOperandRegisters(Flags); + I += F.getNumOperandRegisters(); } if (!RestrRegs.empty()) { diff --git a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp index 51c8775deaeb8..9944ba1599768 100644 --- a/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp +++ b/llvm/lib/CodeGen/GlobalISel/InlineAsmLowering.cpp @@ -229,8 +229,8 @@ static void computeConstraintToUse(const TargetLowering *TLI, } static unsigned getNumOpRegs(const MachineInstr &I, unsigned OpIdx) { - unsigned Flag = I.getOperand(OpIdx).getImm(); - return InlineAsm::getNumOperandRegisters(Flag); + const InlineAsm::Flag F(I.getOperand(OpIdx).getImm()); + return F.getNumOperandRegisters(); } static bool buildAnyextOrCopy(Register Dst, Register Src, @@ -380,9 +380,9 @@ bool InlineAsmLowering::lowerInlineAsm( // Add information to the INLINEASM instruction to know about this // output. - unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind::Mem, 1); - OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID); - Inst.addImm(OpFlags); + InlineAsm::Flag Flag(InlineAsm::Kind::Mem, 1); + Flag.setMemConstraint(ConstraintID); + Inst.addImm(Flag); ArrayRef SourceRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal); assert( @@ -405,17 +405,17 @@ bool InlineAsmLowering::lowerInlineAsm( // Add information to the INLINEASM instruction to know that this // register is set. - unsigned Flag = InlineAsm::getFlagWord( - OpInfo.isEarlyClobber ? InlineAsm::Kind::RegDefEarlyClobber - : InlineAsm::Kind::RegDef, - OpInfo.Regs.size()); + InlineAsm::Flag Flag(OpInfo.isEarlyClobber + ? InlineAsm::Kind::RegDefEarlyClobber + : InlineAsm::Kind::RegDef, + OpInfo.Regs.size()); if (OpInfo.Regs.front().isVirtual()) { // Put the register class of the virtual registers in the flag word. // That way, later passes can recompute register class constraints for // inline assembly as well as normal instructions. Don't do this for // tied operands that can use the regclass information from the def. const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front()); - Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID()); + Flag.setRegClass(RC->getID()); } Inst.addImm(Flag); @@ -441,14 +441,13 @@ bool InlineAsmLowering::lowerInlineAsm( InstFlagIdx += getNumOpRegs(*Inst, InstFlagIdx) + 1; assert(getNumOpRegs(*Inst, InstFlagIdx) == 1 && "Wrong flag"); - unsigned MatchedOperandFlag = Inst->getOperand(InstFlagIdx).getImm(); - if (InlineAsm::isMemKind(MatchedOperandFlag)) { + const InlineAsm::Flag MatchedOperandFlag(Inst->getOperand(InstFlagIdx).getImm()); + if (MatchedOperandFlag.isMemKind()) { LLVM_DEBUG(dbgs() << "Matching input constraint to mem operand not " "supported. This should be target specific.\n"); return false; } - if (!InlineAsm::isRegDefKind(MatchedOperandFlag) && - !InlineAsm::isRegDefEarlyClobberKind(MatchedOperandFlag)) { + if (!MatchedOperandFlag.isRegDefKind() && !MatchedOperandFlag.isRegDefEarlyClobberKind()) { LLVM_DEBUG(dbgs() << "Unknown matching constraint\n"); return false; } @@ -470,9 +469,9 @@ bool InlineAsmLowering::lowerInlineAsm( } // Add Flag and input register operand (In) to Inst. Tie In to Def. - unsigned UseFlag = InlineAsm::getFlagWord(InlineAsm::Kind::RegUse, 1); - unsigned Flag = InlineAsm::getFlagWordForMatchingOp(UseFlag, DefIdx); - Inst.addImm(Flag); + InlineAsm::Flag UseFlag(InlineAsm::Kind::RegUse, 1); + UseFlag.setMatchingOp(DefIdx); + Inst.addImm(UseFlag); Inst.addReg(In); Inst->tieOperands(DefRegIdx, Inst->getNumOperands() - 1); break; @@ -501,8 +500,8 @@ bool InlineAsmLowering::lowerInlineAsm( "Expected constraint to be lowered to at least one operand"); // Add information to the INLINEASM node to know about this input. - unsigned OpFlags = - InlineAsm::getFlagWord(InlineAsm::Kind::Imm, Ops.size()); + const unsigned OpFlags = + InlineAsm::Flag(InlineAsm::Kind::Imm, Ops.size()); Inst.addImm(OpFlags); Inst.add(Ops); break; @@ -520,8 +519,8 @@ bool InlineAsmLowering::lowerInlineAsm( unsigned ConstraintID = TLI->getInlineAsmMemConstraint(OpInfo.ConstraintCode); - unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind::Mem, 1); - OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID); + InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1); + OpFlags.setMemConstraint(ConstraintID); Inst.addImm(OpFlags); ArrayRef SourceRegs = GetOrCreateVRegs(*OpInfo.CallOperandVal); @@ -563,11 +562,11 @@ bool InlineAsmLowering::lowerInlineAsm( return false; } - unsigned Flag = InlineAsm::getFlagWord(InlineAsm::Kind::RegUse, NumRegs); + InlineAsm::Flag Flag(InlineAsm::Kind::RegUse, NumRegs); if (OpInfo.Regs.front().isVirtual()) { // Put the register class of the virtual registers in the flag word. const TargetRegisterClass *RC = MRI->getRegClass(OpInfo.Regs.front()); - Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID()); + Flag.setRegClass(RC->getID()); } Inst.addImm(Flag); if (!buildAnyextOrCopy(OpInfo.Regs[0], SourceRegs[0], MIRBuilder)) @@ -578,10 +577,9 @@ bool InlineAsmLowering::lowerInlineAsm( case InlineAsm::isClobber: { - unsigned NumRegs = OpInfo.Regs.size(); + const unsigned NumRegs = OpInfo.Regs.size(); if (NumRegs > 0) { - unsigned Flag = - InlineAsm::getFlagWord(InlineAsm::Kind::Clobber, NumRegs); + unsigned Flag = InlineAsm::Flag(InlineAsm::Kind::Clobber, NumRegs); Inst.addImm(Flag); for (Register Reg : OpInfo.Regs) { diff --git a/llvm/lib/CodeGen/MachineInstr.cpp b/llvm/lib/CodeGen/MachineInstr.cpp index fefae65829919..8cc3391e0d96a 100644 --- a/llvm/lib/CodeGen/MachineInstr.cpp +++ b/llvm/lib/CodeGen/MachineInstr.cpp @@ -845,7 +845,8 @@ int MachineInstr::findInlineAsmFlagIdx(unsigned OpIdx, // If we reach the implicit register operands, stop looking. if (!FlagMO.isImm()) return -1; - NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm()); + const InlineAsm::Flag F(FlagMO.getImm()); + NumOps = 1 + F.getNumOperandRegisters(); if (i + NumOps > OpIdx) { if (GroupNo) *GroupNo = Group; @@ -922,16 +923,14 @@ MachineInstr::getRegClassConstraint(unsigned OpIdx, if (FlagIdx < 0) return nullptr; - unsigned Flag = getOperand(FlagIdx).getImm(); + const InlineAsm::Flag F(getOperand(FlagIdx).getImm()); unsigned RCID; - if ((InlineAsm::getKind(Flag) == InlineAsm::Kind::RegUse || - InlineAsm::getKind(Flag) == InlineAsm::Kind::RegDef || - InlineAsm::getKind(Flag) == InlineAsm::Kind::RegDefEarlyClobber) && - InlineAsm::hasRegClassConstraint(Flag, RCID)) + if ((F.isRegUseKind() || F.isRegDefKind() || F.isRegDefEarlyClobberKind()) && + F.hasRegClassConstraint(RCID)) return TRI->getRegClass(RCID); // Assume that all registers in a memory operand are pointers. - if (InlineAsm::getKind(Flag) == InlineAsm::Kind::Mem) + if (F.isMemKind()) return TRI->getPointerRegClass(MF); return nullptr; @@ -1196,12 +1195,13 @@ unsigned MachineInstr::findTiedOperandIdx(unsigned OpIdx) const { assert(FlagMO.isImm() && "Invalid tied operand on inline asm"); unsigned CurGroup = GroupIdx.size(); GroupIdx.push_back(i); - NumOps = 1 + InlineAsm::getNumOperandRegisters(FlagMO.getImm()); + const InlineAsm::Flag F(FlagMO.getImm()); + NumOps = 1 + F.getNumOperandRegisters(); // OpIdx belongs to this operand group. if (OpIdx > i && OpIdx < i + NumOps) OpIdxGroup = CurGroup; unsigned TiedGroup; - if (!InlineAsm::isUseOperandTiedToDef(FlagMO.getImm(), TiedGroup)) + if (!F.isUseOperandTiedToDef(TiedGroup)) continue; // Operands in this group are tied to operands in TiedGroup which must be // earlier. Find the number of operands between the two groups. @@ -1765,31 +1765,31 @@ void MachineInstr::print(raw_ostream &OS, ModuleSlotTracker &MST, // Pretty print the inline asm operand descriptor. OS << '$' << AsmOpCount++; unsigned Flag = MO.getImm(); + const InlineAsm::Flag F(Flag); OS << ":["; - OS << InlineAsm::getKindName(InlineAsm::getKind(Flag)); + OS << F.getKindName(); - unsigned RCID = 0; - if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) && - InlineAsm::hasRegClassConstraint(Flag, RCID)) { + unsigned RCID; + if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) { if (TRI) { OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID)); } else OS << ":RC" << RCID; } - if (InlineAsm::isMemKind(Flag)) { - unsigned MCID = InlineAsm::getMemoryConstraintID(Flag); + if (F.isMemKind()) { + const unsigned MCID = F.getMemoryConstraintID(); OS << ":" << InlineAsm::getMemConstraintName(MCID); } - unsigned TiedTo = 0; - if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo)) + unsigned TiedTo; + if (F.isUseOperandTiedToDef(TiedTo)) OS << " tiedto:$" << TiedTo; OS << ']'; // Compute the index of the next operand descriptor. - AsmDescOp += 1 + InlineAsm::getNumOperandRegisters(Flag); + AsmDescOp += 1 + F.getNumOperandRegisters(); } else { LLT TypeToPrint = MRI ? getTypeToPrint(i, PrintedTypes, *MRI) : LLT{}; unsigned TiedOperandIdx = getTiedOperandIdx(i); diff --git a/llvm/lib/CodeGen/MachineVerifier.cpp b/llvm/lib/CodeGen/MachineVerifier.cpp index cfca2d4ba6798..de6d78027d098 100644 --- a/llvm/lib/CodeGen/MachineVerifier.cpp +++ b/llvm/lib/CodeGen/MachineVerifier.cpp @@ -888,7 +888,8 @@ void MachineVerifier::verifyInlineAsm(const MachineInstr *MI) { // There may be implicit ops after the fixed operands. if (!MO.isImm()) break; - NumOps = 1 + InlineAsm::getNumOperandRegisters(MO.getImm()); + const InlineAsm::Flag F(MO.getImm()); + NumOps = 1 + F.getNumOperandRegisters(); } if (OpNo > MI->getNumOperands()) diff --git a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp index 1d4be4df8ec08..a27febe15db83 100644 --- a/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/InstrEmitter.cpp @@ -1311,13 +1311,14 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { unsigned Flags = cast(Node->getOperand(i))->getZExtValue(); - const unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); + const InlineAsm::Flag F(Flags); + const unsigned NumVals = F.getNumOperandRegisters(); GroupIdx.push_back(MIB->getNumOperands()); MIB.addImm(Flags); ++i; // Skip the ID value. - switch (InlineAsm::getKind(Flags)) { + switch (F.getKind()) { case InlineAsm::Kind::RegDef: for (unsigned j = 0; j != NumVals; ++j, ++i) { Register Reg = cast(Node->getOperand(i))->getReg(); @@ -1346,9 +1347,9 @@ EmitSpecialNode(SDNode *Node, bool IsClone, bool IsCloned, /*IsDebug=*/false, IsClone, IsCloned); // Manually set isTied bits. - if (InlineAsm::getKind(Flags) == InlineAsm::Kind::RegUse) { - unsigned DefGroup = 0; - if (InlineAsm::isUseOperandTiedToDef(Flags, DefGroup)) { + if (F.isRegUseKind()) { + unsigned DefGroup; + if (F.isUseOperandTiedToDef(DefGroup)) { unsigned DefIdx = GroupIdx[DefGroup] + 1; unsigned UseIdx = GroupIdx.back() + 1; for (unsigned j = 0; j != NumVals; ++j) diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp index 5b01743d23e0a..5abfe16839ee7 100644 --- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGFast.cpp @@ -498,12 +498,12 @@ bool ScheduleDAGFast::DelayForLiveRegsBottomUp(SUnit *SU, for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { unsigned Flags = cast(Node->getOperand(i))->getZExtValue(); - unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); + const InlineAsm::Flag F(Flags); + unsigned NumVals = F.getNumOperandRegisters(); ++i; // Skip the ID value. - if (InlineAsm::isRegDefKind(Flags) || - InlineAsm::isRegDefEarlyClobberKind(Flags) || - InlineAsm::isClobberKind(Flags)) { + if (F.isRegDefKind() || F.isRegDefEarlyClobberKind() || + F.isClobberKind()) { // Check for def of register or earlyclobber register. for (; NumVals; --NumVals, ++i) { unsigned Reg = cast(Node->getOperand(i))->getReg(); diff --git a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp index 458f50c54824e..4ca3385ef501f 100644 --- a/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp @@ -1377,12 +1377,12 @@ DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl &LRegs) { for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { unsigned Flags = cast(Node->getOperand(i))->getZExtValue(); - unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); + const InlineAsm::Flag F(Flags); + unsigned NumVals = F.getNumOperandRegisters(); ++i; // Skip the ID value. - if (InlineAsm::isRegDefKind(Flags) || - InlineAsm::isRegDefEarlyClobberKind(Flags) || - InlineAsm::isClobberKind(Flags)) { + if (F.isRegDefKind() || F.isRegDefEarlyClobberKind() || + F.isClobberKind()) { // Check for def of register or earlyclobber register. for (; NumVals; --NumVals, ++i) { Register Reg = cast(Node->getOperand(i))->getReg(); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 5a227ba398e1c..738dd10633db6 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -995,9 +995,9 @@ void RegsForValue::AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, std::vector &Ops) const { const TargetLowering &TLI = DAG.getTargetLoweringInfo(); - unsigned Flag = InlineAsm::getFlagWord(Code, Regs.size()); + InlineAsm::Flag Flag(Code, Regs.size()); if (HasMatching) - Flag = InlineAsm::getFlagWordForMatchingOp(Flag, MatchingIdx); + Flag.setMatchingOp(MatchingIdx); else if (!Regs.empty() && Register::isVirtualRegister(Regs.front())) { // Put the register class of the virtual registers in the flag word. That // way, later passes can recompute register class constraints for inline @@ -1006,7 +1006,7 @@ void RegsForValue::AddInlineAsmOperands(InlineAsm::Kind Code, bool HasMatching, // from the def. const MachineRegisterInfo &MRI = DAG.getMachineFunction().getRegInfo(); const TargetRegisterClass *RC = MRI.getRegClass(Regs.front()); - Flag = InlineAsm::getFlagWordForRegClass(Flag, RC->getID()); + Flag.setRegClass(RC->getID()); } SDValue Res = DAG.getTargetConstant(Flag, dl, MVT::i32); @@ -9019,11 +9019,11 @@ findMatchingInlineAsmOperand(unsigned OperandNo, // Advance to the next operand. unsigned OpFlag = cast(AsmNodeOperands[CurOp])->getZExtValue(); - assert((InlineAsm::isRegDefKind(OpFlag) || - InlineAsm::isRegDefEarlyClobberKind(OpFlag) || - InlineAsm::isMemKind(OpFlag)) && - "Skipped past definitions?"); - CurOp += InlineAsm::getNumOperandRegisters(OpFlag) + 1; + const InlineAsm::Flag F(OpFlag); + assert( + (F.isRegDefKind() || F.isRegDefEarlyClobberKind() || F.isMemKind()) && + "Skipped past definitions?"); + CurOp += F.getNumOperandRegisters() + 1; } return CurOp; } @@ -9287,8 +9287,8 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call, "Failed to convert memory constraint code to constraint id."); // Add information to the INLINEASM node to know about this output. - unsigned OpFlags = InlineAsm::getFlagWord(InlineAsm::Kind::Mem, 1); - OpFlags = InlineAsm::getFlagWordForMem(OpFlags, ConstraintID); + InlineAsm::Flag OpFlags(InlineAsm::Kind::Mem, 1); + OpFlags.setMemConstraint(ConstraintID); AsmNodeOperands.push_back(DAG.getTargetConstant(OpFlags, getCurSDLoc(), MVT::i32)); AsmNodeOperands.push_back(OpInfo.CallOperand); @@ -9324,11 +9324,9 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call, // just use its register. auto CurOp = findMatchingInlineAsmOperand(OpInfo.getMatchedOperand(), AsmNodeOperands); - unsigned OpFlag = - cast(AsmNodeOperands[CurOp])->getZExtValue(); - if (InlineAsm::isRegDefKind(OpFlag) || - InlineAsm::isRegDefEarlyClobberKind(OpFlag)) { - // Add (OpFlag&0xffff)>>3 registers to MatchedRegs. + InlineAsm::Flag Flag( + cast(AsmNodeOperands[CurOp])->getZExtValue()); + if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) { if (OpInfo.isIndirect) { // This happens on gcc/testsuite/gcc.dg/pr8788-1.c emitInlineAsmError(Call, "inline asm not supported yet: " @@ -9348,8 +9346,7 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call, TiedReg.isVirtual() ? MRI.getRegClass(TiedReg) : RegVT != MVT::Untyped ? TLI.getRegClassFor(RegVT) : TRI.getMinimalPhysRegClass(TiedReg); - unsigned NumRegs = InlineAsm::getNumOperandRegisters(OpFlag); - for (unsigned i = 0; i != NumRegs; ++i) + for (unsigned i = 0, e = Flag.getNumOperandRegisters(); i != e; ++i) Regs.push_back(MRI.createVirtualRegister(RC)); RegsForValue MatchedRegs(Regs, RegVT, InOperandVal.getValueType()); @@ -9363,16 +9360,15 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call, break; } - assert(InlineAsm::isMemKind(OpFlag) && "Unknown matching constraint!"); - assert(InlineAsm::getNumOperandRegisters(OpFlag) == 1 && + assert(Flag.isMemKind() && "Unknown matching constraint!"); + assert(Flag.getNumOperandRegisters() == 1 && "Unexpected number of operands"); // Add information to the INLINEASM node to know about this input. // See InlineAsm.h isUseOperandTiedToDef. - OpFlag = InlineAsm::convertMemFlagWordToMatchingFlagWord(OpFlag); - OpFlag = InlineAsm::getFlagWordForMatchingOp(OpFlag, - OpInfo.getMatchedOperand()); + Flag.clearMemConstraint(); + Flag.setMatchingOp(OpInfo.getMatchedOperand()); AsmNodeOperands.push_back(DAG.getTargetConstant( - OpFlag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); + Flag, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); AsmNodeOperands.push_back(AsmNodeOperands[CurOp+1]); break; } @@ -9402,8 +9398,7 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call, } // Add information to the INLINEASM node to know about this input. - unsigned ResOpType = - InlineAsm::getFlagWord(InlineAsm::Kind::Imm, Ops.size()); + InlineAsm::Flag ResOpType(InlineAsm::Kind::Imm, Ops.size()); AsmNodeOperands.push_back(DAG.getTargetConstant( ResOpType, getCurSDLoc(), TLI.getPointerTy(DAG.getDataLayout()))); llvm::append_range(AsmNodeOperands, Ops); @@ -9424,8 +9419,8 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call, "Failed to convert memory constraint code to constraint id."); // Add information to the INLINEASM node to know about this input. - unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind::Mem, 1); - ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID); + InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1); + ResOpType.setMemConstraint(ConstraintID); AsmNodeOperands.push_back(DAG.getTargetConstant(ResOpType, getCurSDLoc(), MVT::i32)); @@ -9439,19 +9434,19 @@ void SelectionDAGBuilder::visitInlineAsm(const CallBase &Call, assert(ConstraintID != InlineAsm::Constraint_Unknown && "Failed to convert memory constraint code to constraint id."); - unsigned ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind::Mem, 1); + InlineAsm::Flag ResOpType(InlineAsm::Kind::Mem, 1); SDValue AsmOp = InOperandVal; if (isFunction(InOperandVal)) { auto *GA = cast(InOperandVal); - ResOpType = InlineAsm::getFlagWord(InlineAsm::Kind::Func, 1); + ResOpType = InlineAsm::Flag(InlineAsm::Kind::Func, 1); AsmOp = DAG.getTargetGlobalAddress(GA->getGlobal(), getCurSDLoc(), InOperandVal.getValueType(), GA->getOffset()); } // Add information to the INLINEASM node to know about this input. - ResOpType = InlineAsm::getFlagWordForMem(ResOpType, ConstraintID); + ResOpType.setMemConstraint(ConstraintID); AsmNodeOperands.push_back( DAG.getTargetConstant(ResOpType, getCurSDLoc(), MVT::i32)); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp index d3456d574666d..a7873241df62e 100644 --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGISel.cpp @@ -2076,41 +2076,42 @@ void SelectionDAGISel::SelectInlineAsmMemoryOperands(std::vector &Ops, --e; // Don't process a glue operand if it is here. while (i != e) { - unsigned Flags = cast(InOps[i])->getZExtValue(); - if (!InlineAsm::isMemKind(Flags) && !InlineAsm::isFuncKind(Flags)) { + InlineAsm::Flag Flags(cast(InOps[i])->getZExtValue()); + if (!Flags.isMemKind() && !Flags.isFuncKind()) { // Just skip over this operand, copying the operands verbatim. - Ops.insert(Ops.end(), InOps.begin()+i, - InOps.begin()+i+InlineAsm::getNumOperandRegisters(Flags) + 1); - i += InlineAsm::getNumOperandRegisters(Flags) + 1; + Ops.insert(Ops.end(), InOps.begin() + i, + InOps.begin() + i + Flags.getNumOperandRegisters() + 1); + i += Flags.getNumOperandRegisters() + 1; } else { - assert(InlineAsm::getNumOperandRegisters(Flags) == 1 && + assert(Flags.getNumOperandRegisters() == 1 && "Memory operand with multiple values?"); unsigned TiedToOperand; - if (InlineAsm::isUseOperandTiedToDef(Flags, TiedToOperand)) { + if (Flags.isUseOperandTiedToDef(TiedToOperand)) { // We need the constraint ID from the operand this is tied to. unsigned CurOp = InlineAsm::Op_FirstOperand; - Flags = cast(InOps[CurOp])->getZExtValue(); + Flags = + InlineAsm::Flag(cast(InOps[CurOp])->getZExtValue()); for (; TiedToOperand; --TiedToOperand) { - CurOp += InlineAsm::getNumOperandRegisters(Flags)+1; - Flags = cast(InOps[CurOp])->getZExtValue(); + CurOp += Flags.getNumOperandRegisters() + 1; + Flags = InlineAsm::Flag( + cast(InOps[CurOp])->getZExtValue()); } } // Otherwise, this is a memory operand. Ask the target to select it. std::vector SelOps; - unsigned ConstraintID = InlineAsm::getMemoryConstraintID(Flags); + unsigned ConstraintID = Flags.getMemoryConstraintID(); if (SelectInlineAsmMemoryOperand(InOps[i+1], ConstraintID, SelOps)) report_fatal_error("Could not match memory address. Inline asm" " failure!"); // Add this to the output node. - unsigned NewFlags = - InlineAsm::isMemKind(Flags) - ? InlineAsm::getFlagWord(InlineAsm::Kind::Mem, SelOps.size()) - : InlineAsm::getFlagWord(InlineAsm::Kind::Func, SelOps.size()); - NewFlags = InlineAsm::getFlagWordForMem(NewFlags, ConstraintID); - Ops.push_back(CurDAG->getTargetConstant(NewFlags, DL, MVT::i32)); + Flags = InlineAsm::Flag(Flags.isMemKind() ? InlineAsm::Kind::Mem + : InlineAsm::Kind::Func, + SelOps.size()); + Flags.setMemConstraint(ConstraintID); + Ops.push_back(CurDAG->getTargetConstant(Flags, DL, MVT::i32)); llvm::append_range(Ops, SelOps); i += 2; } diff --git a/llvm/lib/CodeGen/TargetInstrInfo.cpp b/llvm/lib/CodeGen/TargetInstrInfo.cpp index 16b937cd1f684..686044ea572ac 100644 --- a/llvm/lib/CodeGen/TargetInstrInfo.cpp +++ b/llvm/lib/CodeGen/TargetInstrInfo.cpp @@ -1610,25 +1610,24 @@ std::string TargetInstrInfo::createMIROperandComment( assert(Op.isImm() && "Expected flag operand to be an immediate"); // Pretty print the inline asm operand descriptor. unsigned Flag = Op.getImm(); - InlineAsm::Kind Kind = InlineAsm::getKind(Flag); - OS << InlineAsm::getKindName(Kind); + const InlineAsm::Flag F(Flag); + OS << F.getKindName(); - unsigned RCID = 0; - if (!InlineAsm::isImmKind(Flag) && !InlineAsm::isMemKind(Flag) && - InlineAsm::hasRegClassConstraint(Flag, RCID)) { + unsigned RCID; + if (!F.isImmKind() && !F.isMemKind() && F.hasRegClassConstraint(RCID)) { if (TRI) { OS << ':' << TRI->getRegClassName(TRI->getRegClass(RCID)); } else OS << ":RC" << RCID; } - if (InlineAsm::isMemKind(Flag)) { - unsigned MCID = InlineAsm::getMemoryConstraintID(Flag); + if (F.isMemKind()) { + const unsigned MCID = F.getMemoryConstraintID(); OS << ":" << InlineAsm::getMemConstraintName(MCID); } - unsigned TiedTo = 0; - if (InlineAsm::isUseOperandTiedToDef(Flag, TiedTo)) + unsigned TiedTo; + if (F.isUseOperandTiedToDef(TiedTo)) OS << " tiedto:$" << TiedTo; return OS.str(); diff --git a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp index c2aff4687d3b1..41dee3e2c52af 100644 --- a/llvm/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/llvm/lib/Target/ARM/ARMAsmPrinter.cpp @@ -361,25 +361,26 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, const MachineOperand &FlagsOP = MI->getOperand(OpNum - 1); if (!FlagsOP.isImm()) return true; - unsigned Flags = FlagsOP.getImm(); + InlineAsm::Flag F(FlagsOP.getImm()); // This operand may not be the one that actually provides the register. If // it's tied to a previous one then we should refer instead to that one // for registers and their classes. unsigned TiedIdx; - if (InlineAsm::isUseOperandTiedToDef(Flags, TiedIdx)) { + if (F.isUseOperandTiedToDef(TiedIdx)) { for (OpNum = InlineAsm::MIOp_FirstOperand; TiedIdx; --TiedIdx) { unsigned OpFlags = MI->getOperand(OpNum).getImm(); - OpNum += InlineAsm::getNumOperandRegisters(OpFlags) + 1; + const InlineAsm::Flag F(OpFlags); + OpNum += F.getNumOperandRegisters() + 1; } - Flags = MI->getOperand(OpNum).getImm(); + F = InlineAsm::Flag(MI->getOperand(OpNum).getImm()); // Later code expects OpNum to be pointing at the register rather than // the flags. OpNum += 1; } - unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); + const unsigned NumVals = F.getNumOperandRegisters(); unsigned RC; bool FirstHalf; const ARMBaseTargetMachine &ATM = @@ -394,7 +395,7 @@ bool ARMAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, // ExtraCode[0] == 'R'. FirstHalf = !ATM.isLittleEndian(); const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo(); - if (InlineAsm::hasRegClassConstraint(Flags, RC) && + if (F.hasRegClassConstraint(RC) && ARM::GPRPairRegClass.hasSubClassEq(TRI->getRegClass(RC))) { if (NumVals != 1) return true; diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp index 41fde57b03129..5f4fab0675824 100644 --- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -5707,8 +5707,7 @@ bool ARMDAGToDAGISel::tryWriteRegister(SDNode *N){ bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){ std::vector AsmNodeOperands; - unsigned Flag; - InlineAsm::Kind Kind; + InlineAsm::Flag Flag; bool Changed = false; unsigned NumOps = N->getNumOperands(); @@ -5732,10 +5731,8 @@ bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){ if (i < InlineAsm::Op_FirstOperand) continue; - if (ConstantSDNode *C = dyn_cast(N->getOperand(i))) { - Flag = C->getZExtValue(); - Kind = InlineAsm::getKind(Flag); - } + if (const auto *C = dyn_cast(N->getOperand(i))) + Flag = InlineAsm::Flag(C->getZExtValue()); else continue; @@ -5743,13 +5740,13 @@ bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){ // two operands. The first is a constant of value InlineAsm::Kind::Imm, and // the second is a constant with the value of the immediate. If we get here // and we have a Kind::Imm, skip the next operand, and continue. - if (Kind == InlineAsm::Kind::Imm) { + if (Flag.isImmKind()) { SDValue op = N->getOperand(++i); AsmNodeOperands.push_back(op); continue; } - unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag); + const unsigned NumRegs = Flag.getNumOperandRegisters(); if (NumRegs) OpChanged.push_back(false); @@ -5757,7 +5754,7 @@ bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){ bool IsTiedToChangedOp = false; // If it's a use that is tied with a previous def, it has no // reg class constraint. - if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx)) + if (Changed && Flag.isUseOperandTiedToDef(DefIdx)) IsTiedToChangedOp = OpChanged[DefIdx]; // Memory operands to inline asm in the SelectionDAG are modeled with two @@ -5765,18 +5762,18 @@ bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){ // operand. If we get here and we have a Kind::Mem, skip the next operand // (so it doesn't get misinterpreted), and continue. We do this here because // it's important to update the OpChanged array correctly before moving on. - if (Kind == InlineAsm::Kind::Mem) { + if (Flag.isMemKind()) { SDValue op = N->getOperand(++i); AsmNodeOperands.push_back(op); continue; } - if (Kind != InlineAsm::Kind::RegUse && Kind != InlineAsm::Kind::RegDef && - Kind != InlineAsm::Kind::RegDefEarlyClobber) + if (!Flag.isRegUseKind() && !Flag.isRegDefKind() && + !Flag.isRegDefEarlyClobberKind()) continue; unsigned RC; - bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC); + const bool HasRC = Flag.hasRegClassConstraint(RC); if ((!IsTiedToChangedOp && (!HasRC || RC != ARM::GPRRegClassID)) || NumRegs != 2) continue; @@ -5789,8 +5786,7 @@ bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){ SDValue PairedReg; MachineRegisterInfo &MRI = MF->getRegInfo(); - if (Kind == InlineAsm::Kind::RegDef || - Kind == InlineAsm::Kind::RegDefEarlyClobber) { + if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) { // Replace the two GPRs with 1 GPRPair and copy values from GPRPair to // the original GPRs. @@ -5841,11 +5837,11 @@ bool ARMDAGToDAGISel::tryInlineAsm(SDNode *N){ if(PairedReg.getNode()) { OpChanged[OpChanged.size() -1 ] = true; - Flag = InlineAsm::getFlagWord(Kind, 1 /* RegNum*/); + Flag = InlineAsm::Flag(Flag.getKind(), 1 /* RegNum*/); if (IsTiedToChangedOp) - Flag = InlineAsm::getFlagWordForMatchingOp(Flag, DefIdx); + Flag.setMatchingOp(DefIdx); else - Flag = InlineAsm::getFlagWordForRegClass(Flag, ARM::GPRPairRegClassID); + Flag.setRegClass(ARM::GPRPairRegClassID); // Replace the current flag. AsmNodeOperands[AsmNodeOperands.size() -1] = CurDAG->getTargetConstant( Flag, dl, MVT::i32); diff --git a/llvm/lib/Target/AVR/AVRAsmPrinter.cpp b/llvm/lib/Target/AVR/AVRAsmPrinter.cpp index ceee44ec0f202..abe5dcc483409 100644 --- a/llvm/lib/Target/AVR/AVRAsmPrinter.cpp +++ b/llvm/lib/Target/AVR/AVRAsmPrinter.cpp @@ -118,8 +118,8 @@ bool AVRAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, Register Reg = MO.getReg(); unsigned ByteNumber = ExtraCode[0] - 'A'; - unsigned OpFlags = MI->getOperand(OpNum - 1).getImm(); - unsigned NumOpRegs = InlineAsm::getNumOperandRegisters(OpFlags); + const InlineAsm::Flag OpFlags(MI->getOperand(OpNum - 1).getImm()); + const unsigned NumOpRegs = OpFlags.getNumOperandRegisters(); const AVRSubtarget &STI = MF->getSubtarget(); const TargetRegisterInfo &TRI = *STI.getRegisterInfo(); @@ -176,8 +176,8 @@ bool AVRAsmPrinter::PrintAsmMemoryOperand(const MachineInstr *MI, // If NumOpRegs == 2, then we assume it is product of a FrameIndex expansion // and the second operand is an Imm. - unsigned OpFlags = MI->getOperand(OpNum - 1).getImm(); - unsigned NumOpRegs = InlineAsm::getNumOperandRegisters(OpFlags); + const InlineAsm::Flag OpFlags(MI->getOperand(OpNum - 1).getImm()); + const unsigned NumOpRegs = OpFlags.getNumOperandRegisters(); if (NumOpRegs == 2) { assert(MI->getOperand(OpNum).getReg() != AVR::R27R26 && diff --git a/llvm/lib/Target/AVR/AVRISelLowering.cpp b/llvm/lib/Target/AVR/AVRISelLowering.cpp index 9bcceee8fb3bc..2b73704090851 100644 --- a/llvm/lib/Target/AVR/AVRISelLowering.cpp +++ b/llvm/lib/Target/AVR/AVRISelLowering.cpp @@ -976,7 +976,7 @@ SDValue AVRTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const { Ops.push_back(Operand); } } - unsigned Flags = InlineAsm::getFlagWord(InlineAsm::Kind::RegUse, 1); + InlineAsm::Flag Flags(InlineAsm::Kind::RegUse, 1); Ops.push_back(DAG.getTargetConstant(Flags, dl, MVT::i32)); Ops.push_back(ZeroReg); if (Glue) { diff --git a/llvm/lib/Target/CSKY/CSKYISelDAGToDAG.cpp b/llvm/lib/Target/CSKY/CSKYISelDAGToDAG.cpp index 6bb65ba52ce3f..32fa4ea1e302b 100644 --- a/llvm/lib/Target/CSKY/CSKYISelDAGToDAG.cpp +++ b/llvm/lib/Target/CSKY/CSKYISelDAGToDAG.cpp @@ -116,8 +116,7 @@ void CSKYDAGToDAGISel::Select(SDNode *N) { bool CSKYDAGToDAGISel::selectInlineAsm(SDNode *N) { std::vector AsmNodeOperands; - unsigned Flag; - InlineAsm::Kind Kind; + InlineAsm::Flag Flag; bool Changed = false; unsigned NumOps = N->getNumOperands(); @@ -140,23 +139,22 @@ bool CSKYDAGToDAGISel::selectInlineAsm(SDNode *N) { if (i < InlineAsm::Op_FirstOperand) continue; - if (ConstantSDNode *C = dyn_cast(N->getOperand(i))) { - Flag = C->getZExtValue(); - Kind = InlineAsm::getKind(Flag); - } else + if (const auto *C = dyn_cast(N->getOperand(i))) + Flag = InlineAsm::Flag(C->getZExtValue()); + else continue; // Immediate operands to inline asm in the SelectionDAG are modeled with // two operands. The first is a constant of value InlineAsm::Kind::Imm, and // the second is a constant with the value of the immediate. If we get here // and we have a Kind::Imm, skip the next operand, and continue. - if (Kind == InlineAsm::Kind::Imm) { + if (Flag.isImmKind()) { SDValue op = N->getOperand(++i); AsmNodeOperands.push_back(op); continue; } - unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag); + const unsigned NumRegs = Flag.getNumOperandRegisters(); if (NumRegs) OpChanged.push_back(false); @@ -164,7 +162,7 @@ bool CSKYDAGToDAGISel::selectInlineAsm(SDNode *N) { bool IsTiedToChangedOp = false; // If it's a use that is tied with a previous def, it has no // reg class constraint. - if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx)) + if (Changed && Flag.isUseOperandTiedToDef(DefIdx)) IsTiedToChangedOp = OpChanged[DefIdx]; // Memory operands to inline asm in the SelectionDAG are modeled with two @@ -172,18 +170,18 @@ bool CSKYDAGToDAGISel::selectInlineAsm(SDNode *N) { // operand. If we get here and we have a Kind::Mem, skip the next operand // (so it doesn't get misinterpreted), and continue. We do this here because // it's important to update the OpChanged array correctly before moving on. - if (Kind == InlineAsm::Kind::Mem) { + if (Flag.isMemKind()) { SDValue op = N->getOperand(++i); AsmNodeOperands.push_back(op); continue; } - if (Kind != InlineAsm::Kind::RegUse && Kind != InlineAsm::Kind::RegDef && - Kind != InlineAsm::Kind::RegDefEarlyClobber) + if (!Flag.isRegUseKind() && !Flag.isRegDefKind() && + !Flag.isRegDefEarlyClobberKind()) continue; unsigned RC; - bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC); + const bool HasRC = Flag.hasRegClassConstraint(RC); if ((!IsTiedToChangedOp && (!HasRC || RC != CSKY::GPRRegClassID)) || NumRegs != 2) continue; @@ -196,8 +194,7 @@ bool CSKYDAGToDAGISel::selectInlineAsm(SDNode *N) { SDValue PairedReg; MachineRegisterInfo &MRI = MF->getRegInfo(); - if (Kind == InlineAsm::Kind::RegDef || - Kind == InlineAsm::Kind::RegDefEarlyClobber) { + if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) { // Replace the two GPRs with 1 GPRPair and copy values from GPRPair to // the original GPRs. @@ -248,11 +245,12 @@ bool CSKYDAGToDAGISel::selectInlineAsm(SDNode *N) { if (PairedReg.getNode()) { OpChanged[OpChanged.size() - 1] = true; - Flag = InlineAsm::getFlagWord(Kind, 1 /* RegNum*/); + // TODO: maybe a setter for getNumOperandRegisters? + Flag = InlineAsm::Flag(Flag.getKind(), 1 /* RegNum*/); if (IsTiedToChangedOp) - Flag = InlineAsm::getFlagWordForMatchingOp(Flag, DefIdx); + Flag.setMatchingOp(DefIdx); else - Flag = InlineAsm::getFlagWordForRegClass(Flag, CSKY::GPRPairRegClassID); + Flag.setRegClass(CSKY::GPRPairRegClassID); // Replace the current flag. AsmNodeOperands[AsmNodeOperands.size() - 1] = CurDAG->getTargetConstant(Flag, dl, MVT::i32); diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp index 75a3393705911..e950b44341c92 100644 --- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp +++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp @@ -669,31 +669,32 @@ HexagonTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const { --NumOps; // Ignore the flag operand. for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { - unsigned Flags = cast(Op.getOperand(i))->getZExtValue(); - unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); + const InlineAsm::Flag Flags( + cast(Op.getOperand(i))->getZExtValue()); + unsigned NumVals = Flags.getNumOperandRegisters(); ++i; // Skip the ID value. - switch (InlineAsm::getKind(Flags)) { - default: - llvm_unreachable("Bad flags!"); - case InlineAsm::Kind::RegUse: - case InlineAsm::Kind::Imm: - case InlineAsm::Kind::Mem: - i += NumVals; - break; - case InlineAsm::Kind::Clobber: - case InlineAsm::Kind::RegDef: - case InlineAsm::Kind::RegDefEarlyClobber: { - for (; NumVals; --NumVals, ++i) { - Register Reg = cast(Op.getOperand(i))->getReg(); - if (Reg != LR) - continue; - HMFI.setHasClobberLR(true); - return Op; - } - break; + switch (Flags.getKind()) { + default: + llvm_unreachable("Bad flags!"); + case InlineAsm::Kind::RegUse: + case InlineAsm::Kind::Imm: + case InlineAsm::Kind::Mem: + i += NumVals; + break; + case InlineAsm::Kind::Clobber: + case InlineAsm::Kind::RegDef: + case InlineAsm::Kind::RegDefEarlyClobber: { + for (; NumVals; --NumVals, ++i) { + Register Reg = cast(Op.getOperand(i))->getReg(); + if (Reg != LR) + continue; + HMFI.setHasClobberLR(true); + return Op; + } + break; + } } - } } return Op; diff --git a/llvm/lib/Target/Lanai/LanaiAsmPrinter.cpp b/llvm/lib/Target/Lanai/LanaiAsmPrinter.cpp index d142fd3a414fc..c66d9166828c1 100644 --- a/llvm/lib/Target/Lanai/LanaiAsmPrinter.cpp +++ b/llvm/lib/Target/Lanai/LanaiAsmPrinter.cpp @@ -123,8 +123,8 @@ bool LanaiAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNo, const MachineOperand &FlagsOP = MI->getOperand(OpNo - 1); if (!FlagsOP.isImm()) return true; - unsigned Flags = FlagsOP.getImm(); - unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); + const InlineAsm::Flag Flags(FlagsOP.getImm()); + const unsigned NumVals = Flags.getNumOperandRegisters(); if (NumVals != 2) return true; unsigned RegOp = OpNo + 1; diff --git a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp index 26df40e3b13c6..30ff82dd911cf 100644 --- a/llvm/lib/Target/Mips/MipsAsmPrinter.cpp +++ b/llvm/lib/Target/Mips/MipsAsmPrinter.cpp @@ -569,8 +569,8 @@ bool MipsAsmPrinter::PrintAsmOperand(const MachineInstr *MI, unsigned OpNum, const MachineOperand &FlagsOP = MI->getOperand(OpNum - 1); if (!FlagsOP.isImm()) return true; - unsigned Flags = FlagsOP.getImm(); - unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); + const InlineAsm::Flag Flags(FlagsOP.getImm()); + const unsigned NumVals = Flags.getNumOperandRegisters(); // Number of registers represented by this operand. We are looking // for 2 for 32 bit mode and 1 for 64 bit mode. if (NumVals != 2) { diff --git a/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp b/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp index c86666cc40b60..2683d92c1f2ea 100644 --- a/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp +++ b/llvm/lib/Target/Mips/MipsSERegisterInfo.cpp @@ -101,8 +101,8 @@ static inline unsigned getLoadStoreOffsetSizeInBits(const unsigned Opcode, case Mips::SC_MMR6: return 9; case Mips::INLINEASM: { - unsigned ConstraintID = InlineAsm::getMemoryConstraintID(MO.getImm()); - switch (ConstraintID) { + const InlineAsm::Flag F(MO.getImm()); + switch (F.getMemoryConstraintID()) { case InlineAsm::Constraint_ZC: { const MipsSubtarget &Subtarget = MO.getParent() ->getParent() diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp index 6bc89891c0dc4..242e4e45dcd82 100644 --- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp @@ -3793,11 +3793,12 @@ SDValue PPCTargetLowering::LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const { // Check all operands that may contain the LR. for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) { - unsigned Flags = cast(Op.getOperand(i))->getZExtValue(); - unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags); + const InlineAsm::Flag Flags( + cast(Op.getOperand(i))->getZExtValue()); + unsigned NumVals = Flags.getNumOperandRegisters(); ++i; // Skip the ID value. - switch (InlineAsm::getKind(Flags)) { + switch (Flags.getKind()) { default: llvm_unreachable("Bad flags!"); case InlineAsm::Kind::RegUse: diff --git a/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp b/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp index 14cdc1d71f86f..8df63d8d23147 100644 --- a/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp +++ b/llvm/lib/Target/Sparc/SparcISelDAGToDAG.cpp @@ -162,8 +162,7 @@ bool SparcDAGToDAGISel::SelectADDRrr(SDValue Addr, SDValue &R1, SDValue &R2) { // and have that work. Then, delete this function. bool SparcDAGToDAGISel::tryInlineAsm(SDNode *N){ std::vector AsmNodeOperands; - unsigned Flag; - InlineAsm::Kind Kind; + InlineAsm::Flag Flag; bool Changed = false; unsigned NumOps = N->getNumOperands(); @@ -187,10 +186,8 @@ bool SparcDAGToDAGISel::tryInlineAsm(SDNode *N){ if (i < InlineAsm::Op_FirstOperand) continue; - if (ConstantSDNode *C = dyn_cast(N->getOperand(i))) { - Flag = C->getZExtValue(); - Kind = InlineAsm::getKind(Flag); - } + if (const auto *C = dyn_cast(N->getOperand(i))) + Flag = InlineAsm::Flag(C->getZExtValue()); else continue; @@ -198,13 +195,13 @@ bool SparcDAGToDAGISel::tryInlineAsm(SDNode *N){ // two operands. The first is a constant of value InlineAsm::Kind::Imm, and // the second is a constant with the value of the immediate. If we get here // and we have a Kind::Imm, skip the next operand, and continue. - if (Kind == InlineAsm::Kind::Imm) { + if (Flag.isImmKind()) { SDValue op = N->getOperand(++i); AsmNodeOperands.push_back(op); continue; } - unsigned NumRegs = InlineAsm::getNumOperandRegisters(Flag); + const unsigned NumRegs = Flag.getNumOperandRegisters(); if (NumRegs) OpChanged.push_back(false); @@ -212,15 +209,15 @@ bool SparcDAGToDAGISel::tryInlineAsm(SDNode *N){ bool IsTiedToChangedOp = false; // If it's a use that is tied with a previous def, it has no // reg class constraint. - if (Changed && InlineAsm::isUseOperandTiedToDef(Flag, DefIdx)) + if (Changed && Flag.isUseOperandTiedToDef(DefIdx)) IsTiedToChangedOp = OpChanged[DefIdx]; - if (Kind != InlineAsm::Kind::RegUse && Kind != InlineAsm::Kind::RegDef && - Kind != InlineAsm::Kind::RegDefEarlyClobber) + if (!Flag.isRegUseKind() && !Flag.isRegDefKind() && + !Flag.isRegDefEarlyClobberKind()) continue; unsigned RC; - bool HasRC = InlineAsm::hasRegClassConstraint(Flag, RC); + const bool HasRC = Flag.hasRegClassConstraint(RC); if ((!IsTiedToChangedOp && (!HasRC || RC != SP::IntRegsRegClassID)) || NumRegs != 2) continue; @@ -233,8 +230,7 @@ bool SparcDAGToDAGISel::tryInlineAsm(SDNode *N){ SDValue PairedReg; MachineRegisterInfo &MRI = MF->getRegInfo(); - if (Kind == InlineAsm::Kind::RegDef || - Kind == InlineAsm::Kind::RegDefEarlyClobber) { + if (Flag.isRegDefKind() || Flag.isRegDefEarlyClobberKind()) { // Replace the two GPRs with 1 GPRPair and copy values from GPRPair to // the original GPRs. @@ -296,11 +292,11 @@ bool SparcDAGToDAGISel::tryInlineAsm(SDNode *N){ if(PairedReg.getNode()) { OpChanged[OpChanged.size() -1 ] = true; - Flag = InlineAsm::getFlagWord(Kind, 1 /* RegNum*/); + Flag = InlineAsm::Flag(Flag.getKind(), 1 /* RegNum*/); if (IsTiedToChangedOp) - Flag = InlineAsm::getFlagWordForMatchingOp(Flag, DefIdx); + Flag.setMatchingOp(DefIdx); else - Flag = InlineAsm::getFlagWordForRegClass(Flag, SP::IntPairRegClassID); + Flag.setRegClass(SP::IntPairRegClassID); // Replace the current flag. AsmNodeOperands[AsmNodeOperands.size() -1] = CurDAG->getTargetConstant( Flag, dl, MVT::i32); diff --git a/llvm/lib/Target/X86/X86FloatingPoint.cpp b/llvm/lib/Target/X86/X86FloatingPoint.cpp index db118e74fff0c..9ba624ff30fa5 100644 --- a/llvm/lib/Target/X86/X86FloatingPoint.cpp +++ b/llvm/lib/Target/X86/X86FloatingPoint.cpp @@ -1598,8 +1598,9 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) { for (unsigned i = InlineAsm::MIOp_FirstOperand, e = MI.getNumOperands(); i != e && MI.getOperand(i).isImm(); i += 1 + NumOps) { unsigned Flags = MI.getOperand(i).getImm(); + const InlineAsm::Flag F(Flags); - NumOps = InlineAsm::getNumOperandRegisters(Flags); + NumOps = F.getNumOperandRegisters(); if (NumOps != 1) continue; const MachineOperand &MO = MI.getOperand(i + 1); @@ -1611,12 +1612,12 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &Inst) { // If the flag has a register class constraint, this must be an operand // with constraint "f". Record its index and continue. - if (InlineAsm::hasRegClassConstraint(Flags, RCID)) { + if (F.hasRegClassConstraint(RCID)) { FRegIdx.insert(i + 1); continue; } - switch (InlineAsm::getKind(Flags)) { + switch (F.getKind()) { case InlineAsm::Kind::RegUse: STUses |= (1u << STReg); break;