-
Notifications
You must be signed in to change notification settings - Fork 13.5k
[clang][RISCV] Enable struct of homogeneous scalable vector as function argument #78550
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
…on argument Currently llvm support struct as function input, so RISCV tuple type can just use struct of homogeneous scalable vector instead of flatten them.
@llvm/pr-subscribers-backend-risc-v @llvm/pr-subscribers-clang Author: Brandon Wu (4vtomat) ChangesCurrently llvm support struct as function input, so RISCV tuple Patch is 134.96 MiB, truncated to 20.00 KiB below, full version: https://github.com/llvm/llvm-project/pull/78550.diff 510 Files Affected:
diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp
index acf6cbad1c74809..68dbd3fa6acdff5 100644
--- a/clang/lib/CodeGen/CGCall.cpp
+++ b/clang/lib/CodeGen/CGCall.cpp
@@ -1531,7 +1531,8 @@ void ClangToLLVMArgMapping::construct(const ASTContext &Context,
case ABIArgInfo::Direct: {
// FIXME: handle sseregparm someday...
llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
- if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
+ if (AI.isDirect() && AI.getCanBeFlattened() && STy &&
+ !STy->containsHomogeneousScalableVectorTypes()) {
IRArgs.NumberOfArgs = STy->getNumElements();
} else {
IRArgs.NumberOfArgs = 1;
@@ -1713,7 +1714,8 @@ CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
// FCAs, so we flatten them if this is safe to do for this argument.
llvm::Type *argType = ArgInfo.getCoerceToType();
llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
- if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
+ if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened() &&
+ !st->containsHomogeneousScalableVectorTypes()) {
assert(NumIRArgs == st->getNumElements());
for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
ArgTypes[FirstIRArg + i] = st->getElementType(i);
@@ -3206,6 +3208,25 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
}
}
+ llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
+ llvm::TypeSize StructSize;
+ llvm::TypeSize PtrElementSize;
+ if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
+ STy->getNumElements() > 1) {
+ StructSize = CGM.getDataLayout().getTypeAllocSize(STy);
+ PtrElementSize =
+ CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(Ty));
+ if (STy->containsHomogeneousScalableVectorTypes()) {
+ assert(StructSize == PtrElementSize &&
+ "Only allow non-fractional movement of structure with"
+ "homogeneous scalable vector type");
+
+ ArgVals.push_back(ParamValue::forDirect(AI));
+ break;
+ }
+ }
+
Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
Arg->getName());
@@ -3214,53 +3235,29 @@ void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
// Fast-isel and the optimizer generally like scalar values better than
// FCAs, so we flatten them if this is safe to do for this argument.
- llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
STy->getNumElements() > 1) {
- llvm::TypeSize StructSize = CGM.getDataLayout().getTypeAllocSize(STy);
- llvm::TypeSize PtrElementSize =
- CGM.getDataLayout().getTypeAllocSize(Ptr.getElementType());
- if (StructSize.isScalable()) {
- assert(STy->containsHomogeneousScalableVectorTypes() &&
- "ABI only supports structure with homogeneous scalable vector "
- "type");
- assert(StructSize == PtrElementSize &&
- "Only allow non-fractional movement of structure with"
- "homogeneous scalable vector type");
- assert(STy->getNumElements() == NumIRArgs);
-
- llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- auto *AI = Fn->getArg(FirstIRArg + i);
- AI->setName(Arg->getName() + ".coerce" + Twine(i));
- LoadedStructValue =
- Builder.CreateInsertValue(LoadedStructValue, AI, i);
- }
+ uint64_t SrcSize = StructSize.getFixedValue();
+ uint64_t DstSize = PtrElementSize.getFixedValue();
- Builder.CreateStore(LoadedStructValue, Ptr);
+ Address AddrToStoreInto = Address::invalid();
+ if (SrcSize <= DstSize) {
+ AddrToStoreInto = Ptr.withElementType(STy);
} else {
- uint64_t SrcSize = StructSize.getFixedValue();
- uint64_t DstSize = PtrElementSize.getFixedValue();
-
- Address AddrToStoreInto = Address::invalid();
- if (SrcSize <= DstSize) {
- AddrToStoreInto = Ptr.withElementType(STy);
- } else {
- AddrToStoreInto =
- CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
- }
+ AddrToStoreInto =
+ CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
+ }
- assert(STy->getNumElements() == NumIRArgs);
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- auto AI = Fn->getArg(FirstIRArg + i);
- AI->setName(Arg->getName() + ".coerce" + Twine(i));
- Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
- Builder.CreateStore(AI, EltPtr);
- }
+ assert(STy->getNumElements() == NumIRArgs);
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ auto AI = Fn->getArg(FirstIRArg + i);
+ AI->setName(Arg->getName() + ".coerce" + Twine(i));
+ Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
+ Builder.CreateStore(AI, EltPtr);
+ }
- if (SrcSize > DstSize) {
- Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
- }
+ if (SrcSize > DstSize) {
+ Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
}
} else {
// Simple case, just do a coerced store of the argument into the alloca.
@@ -5277,6 +5274,24 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
break;
}
+ llvm::StructType *STy =
+ dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
+ llvm::Type *SrcTy = ConvertTypeForMem(I->Ty);
+ llvm::TypeSize SrcTypeSize;
+ llvm::TypeSize DstTypeSize;
+ if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
+ SrcTypeSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
+ DstTypeSize = CGM.getDataLayout().getTypeAllocSize(STy);
+ if (STy->containsHomogeneousScalableVectorTypes()) {
+ assert(SrcTypeSize == DstTypeSize &&
+ "Only allow non-fractional movement of structure with "
+ "homogeneous scalable vector type");
+
+ IRCallArgs[FirstIRArg] = I->getKnownRValue().getScalarVal();
+ break;
+ }
+ }
+
// FIXME: Avoid the conversion through memory if possible.
Address Src = Address::invalid();
if (!I->isAggregate()) {
@@ -5292,54 +5307,30 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
// Fast-isel and the optimizer generally like scalar values better than
// FCAs, so we flatten them if this is safe to do for this argument.
- llvm::StructType *STy =
- dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
- llvm::Type *SrcTy = Src.getElementType();
- llvm::TypeSize SrcTypeSize =
- CGM.getDataLayout().getTypeAllocSize(SrcTy);
- llvm::TypeSize DstTypeSize = CGM.getDataLayout().getTypeAllocSize(STy);
- if (SrcTypeSize.isScalable()) {
- assert(STy->containsHomogeneousScalableVectorTypes() &&
- "ABI only supports structure with homogeneous scalable vector "
- "type");
- assert(SrcTypeSize == DstTypeSize &&
- "Only allow non-fractional movement of structure with "
- "homogeneous scalable vector type");
- assert(NumIRArgs == STy->getNumElements());
-
- llvm::Value *StoredStructValue =
- Builder.CreateLoad(Src, Src.getName() + ".tuple");
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- llvm::Value *Extract = Builder.CreateExtractValue(
- StoredStructValue, i, Src.getName() + ".extract" + Twine(i));
- IRCallArgs[FirstIRArg + i] = Extract;
- }
+ uint64_t SrcSize = SrcTypeSize.getFixedValue();
+ uint64_t DstSize = DstTypeSize.getFixedValue();
+
+ // If the source type is smaller than the destination type of the
+ // coerce-to logic, copy the source value into a temp alloca the size
+ // of the destination type to allow loading all of it. The bits past
+ // the source value are left undef.
+ if (SrcSize < DstSize) {
+ Address TempAlloca = CreateTempAlloca(STy, Src.getAlignment(),
+ Src.getName() + ".coerce");
+ Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
+ Src = TempAlloca;
} else {
- uint64_t SrcSize = SrcTypeSize.getFixedValue();
- uint64_t DstSize = DstTypeSize.getFixedValue();
-
- // If the source type is smaller than the destination type of the
- // coerce-to logic, copy the source value into a temp alloca the size
- // of the destination type to allow loading all of it. The bits past
- // the source value are left undef.
- if (SrcSize < DstSize) {
- Address TempAlloca = CreateTempAlloca(STy, Src.getAlignment(),
- Src.getName() + ".coerce");
- Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
- Src = TempAlloca;
- } else {
- Src = Src.withElementType(STy);
- }
+ Src = Src.withElementType(STy);
+ }
- assert(NumIRArgs == STy->getNumElements());
- for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
- Address EltPtr = Builder.CreateStructGEP(Src, i);
- llvm::Value *LI = Builder.CreateLoad(EltPtr);
- if (ArgHasMaybeUndefAttr)
- LI = Builder.CreateFreeze(LI);
- IRCallArgs[FirstIRArg + i] = LI;
- }
+ assert(NumIRArgs == STy->getNumElements());
+ for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
+ Address EltPtr = Builder.CreateStructGEP(Src, i);
+ llvm::Value *LI = Builder.CreateLoad(EltPtr);
+ if (ArgHasMaybeUndefAttr)
+ LI = Builder.CreateFreeze(LI);
+ IRCallArgs[FirstIRArg + i] = LI;
}
} else {
// In the simple case, just pass the coerced loaded value.
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vget.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vget.c
index 1f790fe38065ae5..a324cb72b67ccf6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vget.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vget.c
@@ -668,3291 +668,2260 @@ vuint64m4_t test_vget_v_u64m8_u64m4(vuint64m8_t src, size_t index) {
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vget_v_f16mf4x2_f16mf4
-// CHECK-RV64-SAME: (<vscale x 1 x half> [[SRC_COERCE0:%.*]], <vscale x 1 x half> [[SRC_COERCE1:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-SAME: ({ <vscale x 1 x half>, <vscale x 1 x half> } [[SRC:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half> } poison, <vscale x 1 x half> [[SRC_COERCE0]], 0
-// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], <vscale x 1 x half> [[SRC_COERCE1]], 1
-// CHECK-RV64-NEXT: [[TMP2:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[TMP1]], 0
-// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP2]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half> } [[SRC]], 0
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vget_v_f16mf4x2_f16mf4(vfloat16mf4x2_t src, size_t index) {
return __riscv_vget_v_f16mf4x2_f16mf4(src, 0);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vget_v_f16mf4x3_f16mf4
-// CHECK-RV64-SAME: (<vscale x 1 x half> [[SRC_COERCE0:%.*]], <vscale x 1 x half> [[SRC_COERCE1:%.*]], <vscale x 1 x half> [[SRC_COERCE2:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-SAME: ({ <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[SRC:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } poison, <vscale x 1 x half> [[SRC_COERCE0]], 0
-// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], <vscale x 1 x half> [[SRC_COERCE1]], 1
-// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP1]], <vscale x 1 x half> [[SRC_COERCE2]], 2
-// CHECK-RV64-NEXT: [[TMP3:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP2]], 0
-// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP3]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[SRC]], 0
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vget_v_f16mf4x3_f16mf4(vfloat16mf4x3_t src, size_t index) {
return __riscv_vget_v_f16mf4x3_f16mf4(src, 0);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vget_v_f16mf4x4_f16mf4
-// CHECK-RV64-SAME: (<vscale x 1 x half> [[SRC_COERCE0:%.*]], <vscale x 1 x half> [[SRC_COERCE1:%.*]], <vscale x 1 x half> [[SRC_COERCE2:%.*]], <vscale x 1 x half> [[SRC_COERCE3:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-SAME: ({ <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[SRC:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } poison, <vscale x 1 x half> [[SRC_COERCE0]], 0
-// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], <vscale x 1 x half> [[SRC_COERCE1]], 1
-// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP1]], <vscale x 1 x half> [[SRC_COERCE2]], 2
-// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP2]], <vscale x 1 x half> [[SRC_COERCE3]], 3
-// CHECK-RV64-NEXT: [[TMP4:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP3]], 0
-// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP4]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[SRC]], 0
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vget_v_f16mf4x4_f16mf4(vfloat16mf4x4_t src, size_t index) {
return __riscv_vget_v_f16mf4x4_f16mf4(src, 0);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vget_v_f16mf4x5_f16mf4
-// CHECK-RV64-SAME: (<vscale x 1 x half> [[SRC_COERCE0:%.*]], <vscale x 1 x half> [[SRC_COERCE1:%.*]], <vscale x 1 x half> [[SRC_COERCE2:%.*]], <vscale x 1 x half> [[SRC_COERCE3:%.*]], <vscale x 1 x half> [[SRC_COERCE4:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-SAME: ({ <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[SRC:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } poison, <vscale x 1 x half> [[SRC_COERCE0]], 0
-// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], <vscale x 1 x half> [[SRC_COERCE1]], 1
-// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP1]], <vscale x 1 x half> [[SRC_COERCE2]], 2
-// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP2]], <vscale x 1 x half> [[SRC_COERCE3]], 3
-// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP3]], <vscale x 1 x half> [[SRC_COERCE4]], 4
-// CHECK-RV64-NEXT: [[TMP5:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP4]], 0
-// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP5]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[SRC]], 0
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vget_v_f16mf4x5_f16mf4(vfloat16mf4x5_t src, size_t index) {
return __riscv_vget_v_f16mf4x5_f16mf4(src, 0);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vget_v_f16mf4x6_f16mf4
-// CHECK-RV64-SAME: (<vscale x 1 x half> [[SRC_COERCE0:%.*]], <vscale x 1 x half> [[SRC_COERCE1:%.*]], <vscale x 1 x half> [[SRC_COERCE2:%.*]], <vscale x 1 x half> [[SRC_COERCE3:%.*]], <vscale x 1 x half> [[SRC_COERCE4:%.*]], <vscale x 1 x half> [[SRC_COERCE5:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-SAME: ({ <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[SRC:%.*]], i64 noundef [[INDEX:%.*]]) #[[ATTR0]] {
// CHECK-RV64-NEXT: entry:
-// CHECK-RV64-NEXT: [[TMP0:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } poison, <vscale x 1 x half> [[SRC_COERCE0]], 0
-// CHECK-RV64-NEXT: [[TMP1:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP0]], <vscale x 1 x half> [[SRC_COERCE1]], 1
-// CHECK-RV64-NEXT: [[TMP2:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP1]], <vscale x 1 x half> [[SRC_COERCE2]], 2
-// CHECK-RV64-NEXT: [[TMP3:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP2]], <vscale x 1 x half> [[SRC_COERCE3]], 3
-// CHECK-RV64-NEXT: [[TMP4:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP3]], <vscale x 1 x half> [[SRC_COERCE4]], 4
-// CHECK-RV64-NEXT: [[TMP5:%.*]] = insertvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP4]], <vscale x 1 x half> [[SRC_COERCE5]], 5
-// CHECK-RV64-NEXT: [[TMP6:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[TMP5]], 0
-// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP6]]
+// CHECK-RV64-NEXT: [[TMP0:%.*]] = extractvalue { <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half>, <vscale x 1 x half> } [[SRC]], 0
+// CHECK-RV64-NEXT: ret <vscale x 1 x half> [[TMP0]]
//
vfloat16mf4_t test_vget_v_f16mf4x6_f16mf4(vfloat16mf4x6_t src, size_t index) {
return __riscv_vget_v_f16mf4x6_f16mf4(src, 0);
}
// CHECK-RV64-LABEL: define dso_local <vscale x 1 x...
[truncated]
|
… function argument
e4d16bb
to
0d123d9
Compare
✅ With the latest revision this PR passed the C/C++ code formatter. |
This commit handles vector arguments/return for function definition/call, the new class RVVArgDispatcher is added for doing all vector register assignment including mask types, data types as well as tuple types. It precomputes the register number for each argument as per https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#standard-vector-calling-convention-variant and it's passed to calling convention function to handle all vector arguments. Depends on: llvm#78550
This commit handles vector arguments/return for function definition/call, the new class RVVArgDispatcher is added for doing all vector register assignment including mask types, data types as well as tuple types. It precomputes the register number for each argument as per https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#standard-vector-calling-convention-variant and it's passed to calling convention function to handle all vector arguments. Depends on: llvm#78550
… function argument
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
LGTM
This commit handles vector arguments/return for function definition/call, the new class RVVArgDispatcher is added for doing all vector register assignment including mask types, data types as well as tuple types. It precomputes the register number for each argument as per https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#standard-vector-calling-convention-variant and it's passed to calling convention function to handle all vector arguments. Depends on: llvm#78550
…on argument (llvm#78550) llvm IR supports struct as function input, so RISCV tuple type can just use struct of homogeneous scalable vector instead of flatten them.
This commit handles vector arguments/return for function definition/call, the new class RVVArgDispatcher is added for doing all vector register assignment including mask types, data types as well as tuple types. It precomputes the register number for each argument as per https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#standard-vector-calling-convention-variant and it's passed to calling convention function to handle all vector arguments. Depends on: llvm#78550
This commit handles vector arguments/return for function definition/call, the new class RVVArgDispatcher is added for doing all vector register assignment including mask types, data types as well as tuple types. It precomputes the register number for each argument as per https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#standard-vector-calling-convention-variant and it's passed to calling convention function to handle all vector arguments. Depends on: llvm#78550
This commit handles vector arguments/return for function definition/call, the new class RVVArgDispatcher is added for doing all vector register assignment including mask types, data types as well as tuple types. It precomputes the register number for each argument as per https://github.com/riscv-non-isa/riscv-elf-psabi-doc/blob/master/riscv-cc.adoc#standard-vector-calling-convention-variant and it's passed to calling convention function to handle all vector arguments. Depends on: #78550
llvm IR supports struct as function input, so RISCV tuple
type can just use struct of homogeneous scalable vector instead
of flatten them.