diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp index 3108ce9837891..bd602635dd5f7 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp +++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.cpp @@ -423,18 +423,84 @@ bool RISCVCallLowering::lowerReturn(MachineIRBuilder &MIRBuilder, return true; } +static const MCPhysReg ArgGPRs[] = {RISCV::X10, RISCV::X11, RISCV::X12, + RISCV::X13, RISCV::X14, RISCV::X15, + RISCV::X16, RISCV::X17}; + +/// If there are varargs that were passed in a0-a7, the data in those registers +/// must be copied to the varargs save area on the stack. +void RISCVCallLowering::saveVarArgRegisters( + MachineIRBuilder &MIRBuilder, CallLowering::IncomingValueHandler &Handler, + IncomingValueAssigner &Assigner, CCState &CCInfo) const { + MachineFunction &MF = MIRBuilder.getMF(); + const RISCVSubtarget &Subtarget = MF.getSubtarget(); + unsigned XLenInBytes = Subtarget.getXLen() / 8; + ArrayRef ArgRegs(ArgGPRs); + unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs); + + // Offset of the first variable argument from stack pointer, and size of + // the vararg save area. For now, the varargs save area is either zero or + // large enough to hold a0-a7. + int VaArgOffset, VarArgsSaveSize; + // If all registers are allocated, then all varargs must be passed on the + // stack and we don't need to save any argregs. + if (ArgRegs.size() == Idx) { + VaArgOffset = Assigner.StackSize; + VarArgsSaveSize = 0; + } else { + VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx); + VaArgOffset = -VarArgsSaveSize; + } + + // Record the frame index of the first variable argument which is a value + // necessary to G_VASTART. + MachineFrameInfo &MFI = MF.getFrameInfo(); + int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); + RISCVMachineFunctionInfo *RVFI = MF.getInfo(); + RVFI->setVarArgsFrameIndex(FI); + + // If saving an odd number of registers then create an extra stack slot to + // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures + // offsets to even-numbered registered remain 2*XLEN-aligned. + if (Idx % 2) { + MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true); + VarArgsSaveSize += XLenInBytes; + } + RVFI->setVarArgsSaveSize(VarArgsSaveSize); + + // Copy the integer registers that may have been used for passing varargs + // to the vararg save area. + const LLT p0 = LLT::pointer(MF.getDataLayout().getAllocaAddrSpace(), + Subtarget.getXLen()); + const LLT sXLen = LLT::scalar(Subtarget.getXLen()); + const MVT XLenVT = Subtarget.getXLenVT(); + MachineRegisterInfo &MRI = MF.getRegInfo(); + for (unsigned I = Idx; I < ArgRegs.size(); ++I, VaArgOffset += XLenInBytes) { + const Register VReg = MRI.createGenericVirtualRegister(sXLen); + Handler.assignValueToReg( + VReg, ArgRegs[I], + CCValAssign::getReg(I + MF.getFunction().getNumOperands(), XLenVT, + ArgRegs[I], XLenVT, CCValAssign::Full)); + FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true); + auto FIN = MIRBuilder.buildFrameIndex(p0, FI); + auto MPO = MachinePointerInfo::getFixedStack(MF, FI); + auto Store = + MIRBuilder.buildStore(VReg, FIN, MPO, inferAlignFromPtrInfo(MF, MPO)); + // This was taken from SelectionDAG, but we are not sure why it exists. + // It is being investigated in github.com/llvm/llvm-project/issues/73735. + Store->memoperands()[0]->setValue((Value *)nullptr); + } +} + bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, const Function &F, ArrayRef> VRegs, FunctionLoweringInfo &FLI) const { - // Early exit if there are no arguments. - if (F.arg_empty()) + // Early exit if there are no arguments. varargs are not part of F.args() but + // must be lowered. + if (F.arg_empty() && !F.isVarArg()) return true; - // TODO: Support vararg functions. - if (F.isVarArg()) - return false; - const RISCVSubtarget &Subtarget = MIRBuilder.getMF().getSubtarget(); for (auto &Arg : F.args()) { @@ -467,8 +533,16 @@ bool RISCVCallLowering::lowerFormalArguments(MachineIRBuilder &MIRBuilder, /*IsRet=*/false); RISCVFormalArgHandler Handler(MIRBuilder, MF.getRegInfo()); - return determineAndHandleAssignments(Handler, Assigner, SplitArgInfos, - MIRBuilder, CC, F.isVarArg()); + SmallVector ArgLocs; + CCState CCInfo(CC, F.isVarArg(), MIRBuilder.getMF(), ArgLocs, F.getContext()); + if (!determineAssignments(Assigner, SplitArgInfos, CCInfo) || + !handleAssignments(Handler, SplitArgInfos, CCInfo, ArgLocs, MIRBuilder)) + return false; + + if (F.isVarArg()) + saveVarArgRegisters(MIRBuilder, Handler, Assigner, CCInfo); + + return true; } bool RISCVCallLowering::lowerCall(MachineIRBuilder &MIRBuilder, diff --git a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h index d80a666f34894..abe704b4a6451 100644 --- a/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h +++ b/llvm/lib/Target/RISCV/GISel/RISCVCallLowering.h @@ -42,6 +42,11 @@ class RISCVCallLowering : public CallLowering { private: bool lowerReturnVal(MachineIRBuilder &MIRBuilder, const Value *Val, ArrayRef VRegs, MachineInstrBuilder &Ret) const; + + void saveVarArgRegisters(MachineIRBuilder &MIRBuilder, + CallLowering::IncomingValueHandler &Handler, + IncomingValueAssigner &Assigner, + CCState &CCInfo) const; }; } // end namespace llvm diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll new file mode 100644 index 0000000000000..ecfccc48bb34f --- /dev/null +++ b/llvm/test/CodeGen/RISCV/GlobalISel/irtranslator/lower-args-vararg.ll @@ -0,0 +1,365 @@ +; NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py UTC_ARGS: --version 4 +; RUN: llc -mtriple=riscv32 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefixes=RV32 %s +; RUN: llc -mtriple=riscv64 -global-isel -stop-after=irtranslator -verify-machineinstrs < %s \ +; RUN: | FileCheck -check-prefixes=RV64 %s + +define void @va1arg(ptr %a, ...) { + ; RV32-LABEL: name: va1arg + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(s32) = COPY $x11 + ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6 + ; RV32-NEXT: G_STORE [[COPY1]](s32), [[FRAME_INDEX]](p0) :: (store (s32)) + ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5 + ; RV32-NEXT: G_STORE [[COPY2]](s32), [[FRAME_INDEX1]](p0) :: (store (s32), align 8) + ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4 + ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX2]](p0) :: (store (s32)) + ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 + ; RV32-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3 + ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX3]](p0) :: (store (s32), align 16) + ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 + ; RV32-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2 + ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX4]](p0) :: (store (s32)) + ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 + ; RV32-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 + ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX5]](p0) :: (store (s32), align 8) + ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 + ; RV32-NEXT: [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 + ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX6]](p0) :: (store (s32)) + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: va1arg + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(s64) = COPY $x11 + ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.6 + ; RV64-NEXT: G_STORE [[COPY1]](s64), [[FRAME_INDEX]](p0) :: (store (s64)) + ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5 + ; RV64-NEXT: G_STORE [[COPY2]](s64), [[FRAME_INDEX1]](p0) :: (store (s64), align 16) + ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4 + ; RV64-NEXT: G_STORE [[COPY3]](s64), [[FRAME_INDEX2]](p0) :: (store (s64)) + ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 + ; RV64-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3 + ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX3]](p0) :: (store (s64), align 16) + ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 + ; RV64-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2 + ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX4]](p0) :: (store (s64)) + ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 + ; RV64-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 + ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX5]](p0) :: (store (s64), align 16) + ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 + ; RV64-NEXT: [[FRAME_INDEX6:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 + ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX6]](p0) :: (store (s64)) + ; RV64-NEXT: PseudoRET + ret void +} + +define void @va2arg(ptr %a, ptr %b, ...) { + ; RV32-LABEL: name: va2arg + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11 + ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(s32) = COPY $x12 + ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5 + ; RV32-NEXT: G_STORE [[COPY2]](s32), [[FRAME_INDEX]](p0) :: (store (s32), align 8) + ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4 + ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX1]](p0) :: (store (s32)) + ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 + ; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3 + ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX2]](p0) :: (store (s32), align 16) + ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 + ; RV32-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2 + ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX3]](p0) :: (store (s32)) + ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 + ; RV32-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 + ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX4]](p0) :: (store (s32), align 8) + ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 + ; RV32-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 + ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX5]](p0) :: (store (s32)) + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: va2arg + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(s64) = COPY $x12 + ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.5 + ; RV64-NEXT: G_STORE [[COPY2]](s64), [[FRAME_INDEX]](p0) :: (store (s64), align 16) + ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4 + ; RV64-NEXT: G_STORE [[COPY3]](s64), [[FRAME_INDEX1]](p0) :: (store (s64)) + ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 + ; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3 + ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX2]](p0) :: (store (s64), align 16) + ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 + ; RV64-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2 + ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX3]](p0) :: (store (s64)) + ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 + ; RV64-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 + ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX4]](p0) :: (store (s64), align 16) + ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 + ; RV64-NEXT: [[FRAME_INDEX5:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 + ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX5]](p0) :: (store (s64)) + ; RV64-NEXT: PseudoRET + ret void +} + +define void @va3arg(ptr %a, ptr %b, ptr %c, ...) { + ; RV32-LABEL: name: va3arg + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11 + ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12 + ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(s32) = COPY $x13 + ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4 + ; RV32-NEXT: G_STORE [[COPY3]](s32), [[FRAME_INDEX]](p0) :: (store (s32)) + ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 + ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3 + ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX1]](p0) :: (store (s32), align 16) + ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 + ; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2 + ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX2]](p0) :: (store (s32)) + ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 + ; RV32-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 + ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX3]](p0) :: (store (s32), align 8) + ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 + ; RV32-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 + ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX4]](p0) :: (store (s32)) + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: va3arg + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12 + ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(s64) = COPY $x13 + ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.4 + ; RV64-NEXT: G_STORE [[COPY3]](s64), [[FRAME_INDEX]](p0) :: (store (s64)) + ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 + ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3 + ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX1]](p0) :: (store (s64), align 16) + ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 + ; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2 + ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX2]](p0) :: (store (s64)) + ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 + ; RV64-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 + ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX3]](p0) :: (store (s64), align 16) + ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 + ; RV64-NEXT: [[FRAME_INDEX4:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 + ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX4]](p0) :: (store (s64)) + ; RV64-NEXT: PseudoRET + ret void +} + +define void @va4arg(ptr %a, ptr %b, ptr %c, ptr %d, ...) { + ; RV32-LABEL: name: va4arg + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11 + ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12 + ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13 + ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(s32) = COPY $x14 + ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3 + ; RV32-NEXT: G_STORE [[COPY4]](s32), [[FRAME_INDEX]](p0) :: (store (s32), align 16) + ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 + ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2 + ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX1]](p0) :: (store (s32)) + ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 + ; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 + ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX2]](p0) :: (store (s32), align 8) + ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 + ; RV32-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 + ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX3]](p0) :: (store (s32)) + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: va4arg + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12 + ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13 + ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(s64) = COPY $x14 + ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.3 + ; RV64-NEXT: G_STORE [[COPY4]](s64), [[FRAME_INDEX]](p0) :: (store (s64), align 16) + ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 + ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2 + ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX1]](p0) :: (store (s64)) + ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 + ; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 + ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX2]](p0) :: (store (s64), align 16) + ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 + ; RV64-NEXT: [[FRAME_INDEX3:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 + ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX3]](p0) :: (store (s64)) + ; RV64-NEXT: PseudoRET + ret void +} + +define void @va5arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ...) { + ; RV32-LABEL: name: va5arg + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11 + ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12 + ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13 + ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14 + ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(s32) = COPY $x15 + ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2 + ; RV32-NEXT: G_STORE [[COPY5]](s32), [[FRAME_INDEX]](p0) :: (store (s32)) + ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 + ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 + ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX1]](p0) :: (store (s32), align 8) + ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 + ; RV32-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 + ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX2]](p0) :: (store (s32)) + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: va5arg + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12 + ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13 + ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14 + ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(s64) = COPY $x15 + ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.2 + ; RV64-NEXT: G_STORE [[COPY5]](s64), [[FRAME_INDEX]](p0) :: (store (s64)) + ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 + ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 + ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX1]](p0) :: (store (s64), align 16) + ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 + ; RV64-NEXT: [[FRAME_INDEX2:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 + ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX2]](p0) :: (store (s64)) + ; RV64-NEXT: PseudoRET + ret void +} + +define void @va6arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ...) { + ; RV32-LABEL: name: va6arg + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11 + ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12 + ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13 + ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14 + ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15 + ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(s32) = COPY $x16 + ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 + ; RV32-NEXT: G_STORE [[COPY6]](s32), [[FRAME_INDEX]](p0) :: (store (s32), align 8) + ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 + ; RV32-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 + ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX1]](p0) :: (store (s32)) + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: va6arg + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12 + ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13 + ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14 + ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15 + ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(s64) = COPY $x16 + ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.1 + ; RV64-NEXT: G_STORE [[COPY6]](s64), [[FRAME_INDEX]](p0) :: (store (s64), align 16) + ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 + ; RV64-NEXT: [[FRAME_INDEX1:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 + ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX1]](p0) :: (store (s64)) + ; RV64-NEXT: PseudoRET + ret void +} + +define void @va7arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ptr %g, ...) { + ; RV32-LABEL: name: va7arg + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11 + ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12 + ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13 + ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14 + ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15 + ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(p0) = COPY $x16 + ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(s32) = COPY $x17 + ; RV32-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 + ; RV32-NEXT: G_STORE [[COPY7]](s32), [[FRAME_INDEX]](p0) :: (store (s32)) + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: va7arg + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12 + ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13 + ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14 + ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15 + ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(p0) = COPY $x16 + ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(s64) = COPY $x17 + ; RV64-NEXT: [[FRAME_INDEX:%[0-9]+]]:_(p0) = G_FRAME_INDEX %fixed-stack.0 + ; RV64-NEXT: G_STORE [[COPY7]](s64), [[FRAME_INDEX]](p0) :: (store (s64)) + ; RV64-NEXT: PseudoRET + ret void +} + +define void @va8arg(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e, ptr %f, ptr %g, ptr %h, ...) { + ; RV32-LABEL: name: va8arg + ; RV32: bb.1 (%ir-block.0): + ; RV32-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 + ; RV32-NEXT: {{ $}} + ; RV32-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV32-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11 + ; RV32-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12 + ; RV32-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13 + ; RV32-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14 + ; RV32-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15 + ; RV32-NEXT: [[COPY6:%[0-9]+]]:_(p0) = COPY $x16 + ; RV32-NEXT: [[COPY7:%[0-9]+]]:_(p0) = COPY $x17 + ; RV32-NEXT: PseudoRET + ; + ; RV64-LABEL: name: va8arg + ; RV64: bb.1 (%ir-block.0): + ; RV64-NEXT: liveins: $x10, $x11, $x12, $x13, $x14, $x15, $x16, $x17 + ; RV64-NEXT: {{ $}} + ; RV64-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10 + ; RV64-NEXT: [[COPY1:%[0-9]+]]:_(p0) = COPY $x11 + ; RV64-NEXT: [[COPY2:%[0-9]+]]:_(p0) = COPY $x12 + ; RV64-NEXT: [[COPY3:%[0-9]+]]:_(p0) = COPY $x13 + ; RV64-NEXT: [[COPY4:%[0-9]+]]:_(p0) = COPY $x14 + ; RV64-NEXT: [[COPY5:%[0-9]+]]:_(p0) = COPY $x15 + ; RV64-NEXT: [[COPY6:%[0-9]+]]:_(p0) = COPY $x16 + ; RV64-NEXT: [[COPY7:%[0-9]+]]:_(p0) = COPY $x17 + ; RV64-NEXT: PseudoRET + ret void +}