@@ -653,9 +653,55 @@ RValue CIRGenFunction::buildLoadOfLValue(LValue LV, SourceLocation Loc) {
653653 getLoc (Loc), load, LV.getVectorIdx ()));
654654 }
655655
656+ if (LV.isExtVectorElt ()) {
657+ return buildLoadOfExtVectorElementLValue (LV);
658+ }
659+
656660 llvm_unreachable (" NYI" );
657661}
658662
663+ int64_t CIRGenFunction::getAccessedFieldNo (unsigned int idx,
664+ const mlir::ArrayAttr elts) {
665+ auto elt = mlir::dyn_cast<mlir::IntegerAttr>(elts[idx]);
666+ assert (elt && " The indices should be integer attributes" );
667+ return elt.getInt ();
668+ }
669+
670+ // If this is a reference to a subset of the elements of a vector, create an
671+ // appropriate shufflevector.
672+ RValue CIRGenFunction::buildLoadOfExtVectorElementLValue (LValue LV) {
673+ mlir::Location loc = LV.getExtVectorPointer ().getLoc ();
674+ mlir::Value Vec = builder.createLoad (loc, LV.getExtVectorAddress ());
675+
676+ // HLSL allows treating scalars as one-element vectors. Converting the scalar
677+ // IR value to a vector here allows the rest of codegen to behave as normal.
678+ if (getLangOpts ().HLSL && !mlir::isa<mlir::cir::VectorType>(Vec.getType ())) {
679+ llvm_unreachable (" HLSL NYI" );
680+ }
681+
682+ const mlir::ArrayAttr Elts = LV.getExtVectorElts ();
683+
684+ // If the result of the expression is a non-vector type, we must be extracting
685+ // a single element. Just codegen as an extractelement.
686+ const auto *ExprVT = LV.getType ()->getAs <clang::VectorType>();
687+ if (!ExprVT) {
688+ int64_t InIdx = getAccessedFieldNo (0 , Elts);
689+ mlir::cir::ConstantOp Elt =
690+ builder.getConstInt (loc, builder.getSInt64Ty (), InIdx);
691+ return RValue::get (builder.create <mlir::cir::VecExtractOp>(loc, Vec, Elt));
692+ }
693+
694+ // Always use shuffle vector to try to retain the original program structure
695+ unsigned NumResultElts = ExprVT->getNumElements ();
696+
697+ SmallVector<int64_t , 4 > Mask;
698+ for (unsigned i = 0 ; i != NumResultElts; ++i)
699+ Mask.push_back (getAccessedFieldNo (i, Elts));
700+
701+ Vec = builder.createVecShuffle (loc, Vec, Mask);
702+ return RValue::get (Vec);
703+ }
704+
659705RValue CIRGenFunction::buildLoadOfBitfieldLValue (LValue LV,
660706 SourceLocation Loc) {
661707 const CIRGenBitFieldInfo &info = LV.getBitFieldInfo ();
@@ -674,6 +720,80 @@ RValue CIRGenFunction::buildLoadOfBitfieldLValue(LValue LV,
674720 return RValue::get (field);
675721}
676722
723+ void CIRGenFunction::buildStoreThroughExtVectorComponentLValue (RValue Src,
724+ LValue Dst) {
725+ mlir::Location loc = Dst.getExtVectorPointer ().getLoc ();
726+
727+ // HLSL allows storing to scalar values through ExtVector component LValues.
728+ // To support this we need to handle the case where the destination address is
729+ // a scalar.
730+ Address DstAddr = Dst.getExtVectorAddress ();
731+ if (!mlir::isa<mlir::cir::VectorType>(DstAddr.getElementType ())) {
732+ llvm_unreachable (" HLSL NYI" );
733+ }
734+
735+ // This access turns into a read/modify/write of the vector. Load the input
736+ // value now.
737+ mlir::Value Vec = builder.createLoad (loc, DstAddr);
738+ const mlir::ArrayAttr Elts = Dst.getExtVectorElts ();
739+
740+ mlir::Value SrcVal = Src.getScalarVal ();
741+
742+ if (const clang::VectorType *VTy =
743+ Dst.getType ()->getAs <clang::VectorType>()) {
744+ unsigned NumSrcElts = VTy->getNumElements ();
745+ unsigned NumDstElts = cast<mlir::cir::VectorType>(Vec.getType ()).getSize ();
746+ if (NumDstElts == NumSrcElts) {
747+ // Use shuffle vector is the src and destination are the same number of
748+ // elements and restore the vector mask since it is on the side it will be
749+ // stored.
750+ SmallVector<int64_t , 4 > Mask (NumDstElts);
751+ for (unsigned i = 0 ; i != NumSrcElts; ++i)
752+ Mask[getAccessedFieldNo (i, Elts)] = i;
753+
754+ Vec = builder.createVecShuffle (loc, SrcVal, Mask);
755+ } else if (NumDstElts > NumSrcElts) {
756+ // Extended the source vector to the same length and then shuffle it
757+ // into the destination.
758+ // FIXME: since we're shuffling with undef, can we just use the indices
759+ // into that? This could be simpler.
760+ SmallVector<int64_t , 4 > ExtMask;
761+ for (unsigned i = 0 ; i != NumSrcElts; ++i)
762+ ExtMask.push_back (i);
763+ ExtMask.resize (NumDstElts, -1 );
764+ mlir::Value ExtSrcVal = builder.createVecShuffle (loc, SrcVal, ExtMask);
765+ // build identity
766+ SmallVector<int64_t , 4 > Mask;
767+ for (unsigned i = 0 ; i != NumDstElts; ++i)
768+ Mask.push_back (i);
769+
770+ // When the vector size is odd and .odd or .hi is used, the last element
771+ // of the Elts constant array will be one past the size of the vector.
772+ // Ignore the last element here, if it is greater than the mask size.
773+ if (getAccessedFieldNo (NumSrcElts - 1 , Elts) == Mask.size ())
774+ llvm_unreachable (" NYI" );
775+
776+ // modify when what gets shuffled in
777+ for (unsigned i = 0 ; i != NumSrcElts; ++i)
778+ Mask[getAccessedFieldNo (i, Elts)] = i + NumDstElts;
779+ Vec = builder.createVecShuffle (loc, Vec, ExtSrcVal, Mask);
780+ } else {
781+ // We should never shorten the vector
782+ llvm_unreachable (" unexpected shorten vector length" );
783+ }
784+ } else {
785+ // If the Src is a scalar (not a vector), and the target is a vector it must
786+ // be updating one element.
787+ unsigned InIdx = getAccessedFieldNo (0 , Elts);
788+ auto Elt = builder.getSInt64 (InIdx, loc);
789+
790+ Vec = builder.create <mlir::cir::VecInsertOp>(loc, Vec, SrcVal, Elt);
791+ }
792+
793+ builder.createStore (loc, Vec, Dst.getExtVectorAddress (),
794+ Dst.isVolatileQualified ());
795+ }
796+
677797void CIRGenFunction::buildStoreThroughLValue (RValue Src, LValue Dst,
678798 bool isInit) {
679799 if (!Dst.isSimple ()) {
@@ -686,6 +806,10 @@ void CIRGenFunction::buildStoreThroughLValue(RValue Src, LValue Dst,
686806 builder.createStore (loc, Vector, Dst.getVectorAddress ());
687807 return ;
688808 }
809+
810+ if (Dst.isExtVectorElt ())
811+ return buildStoreThroughExtVectorComponentLValue (Src, Dst);
812+
689813 assert (Dst.isBitField () && " NIY LValue type" );
690814 mlir::Value result;
691815 return buildStoreThroughBitfieldLValue (Src, Dst, result);
@@ -979,6 +1103,71 @@ CIRGenFunction::buildPointerToDataMemberBinaryExpr(const BinaryOperator *E) {
9791103 return makeAddrLValue (memberAddr, memberPtrTy->getPointeeType (), baseInfo);
9801104}
9811105
1106+ LValue
1107+ CIRGenFunction::buildExtVectorElementExpr (const ExtVectorElementExpr *E) {
1108+ // Emit the base vector as an l-value.
1109+ LValue Base;
1110+
1111+ // ExtVectorElementExpr's base can either be a vector or pointer to vector.
1112+ if (E->isArrow ()) {
1113+ // If it is a pointer to a vector, emit the address and form an lvalue with
1114+ // it.
1115+ LValueBaseInfo BaseInfo;
1116+ // TODO(cir): Support TBAA
1117+ assert (!MissingFeatures::tbaa ());
1118+ Address Ptr = buildPointerWithAlignment (E->getBase (), &BaseInfo);
1119+ const auto *PT = E->getBase ()->getType ()->castAs <clang::PointerType>();
1120+ Base = makeAddrLValue (Ptr, PT->getPointeeType (), BaseInfo);
1121+ Base.getQuals ().removeObjCGCAttr ();
1122+ } else if (E->getBase ()->isGLValue ()) {
1123+ // Otherwise, if the base is an lvalue ( as in the case of foo.x.x),
1124+ // emit the base as an lvalue.
1125+ assert (E->getBase ()->getType ()->isVectorType ());
1126+ Base = buildLValue (E->getBase ());
1127+ } else {
1128+ // Otherwise, the base is a normal rvalue (as in (V+V).x), emit it as such.
1129+ assert (E->getBase ()->getType ()->isVectorType () &&
1130+ " Result must be a vector" );
1131+ mlir::Value Vec = buildScalarExpr (E->getBase ());
1132+
1133+ // Store the vector to memory (because LValue wants an address).
1134+ QualType BaseTy = E->getBase ()->getType ();
1135+ Address VecMem = CreateMemTemp (BaseTy, Vec.getLoc (), " tmp" );
1136+ builder.createStore (Vec.getLoc (), Vec, VecMem);
1137+ Base = makeAddrLValue (VecMem, BaseTy, AlignmentSource::Decl);
1138+ }
1139+
1140+ QualType type =
1141+ E->getType ().withCVRQualifiers (Base.getQuals ().getCVRQualifiers ());
1142+
1143+ // Encode the element access list into a vector of unsigned indices.
1144+ SmallVector<uint32_t , 4 > indices;
1145+ E->getEncodedElementAccess (indices);
1146+
1147+ if (Base.isSimple ()) {
1148+ SmallVector<int64_t , 4 > attrElts;
1149+ for (uint32_t i : indices) {
1150+ attrElts.push_back (static_cast <int64_t >(i));
1151+ }
1152+ auto elts = builder.getI64ArrayAttr (attrElts);
1153+ return LValue::MakeExtVectorElt (Base.getAddress (), elts, type,
1154+ Base.getBaseInfo ());
1155+ }
1156+ assert (Base.isExtVectorElt () && " Can only subscript lvalue vec elts here!" );
1157+
1158+ mlir::ArrayAttr baseElts = Base.getExtVectorElts ();
1159+
1160+ // Composite the two indices
1161+ SmallVector<int64_t , 4 > attrElts;
1162+ for (uint32_t i : indices) {
1163+ attrElts.push_back (getAccessedFieldNo (i, baseElts));
1164+ }
1165+ auto elts = builder.getI64ArrayAttr (attrElts);
1166+
1167+ return LValue::MakeExtVectorElt (Base.getExtVectorAddress (), elts, type,
1168+ Base.getBaseInfo ());
1169+ }
1170+
9821171LValue CIRGenFunction::buildBinaryOperatorLValue (const BinaryOperator *E) {
9831172 // Comma expressions just emit their LHS then their RHS as an l-value.
9841173 if (E->getOpcode () == BO_Comma) {
@@ -2263,6 +2452,8 @@ LValue CIRGenFunction::buildLValue(const Expr *E) {
22632452 return buildConditionalOperatorLValue (cast<ConditionalOperator>(E));
22642453 case Expr::ArraySubscriptExprClass:
22652454 return buildArraySubscriptExpr (cast<ArraySubscriptExpr>(E));
2455+ case Expr::ExtVectorElementExprClass:
2456+ return buildExtVectorElementExpr (cast<ExtVectorElementExpr>(E));
22662457 case Expr::BinaryOperatorClass:
22672458 return buildBinaryOperatorLValue (cast<BinaryOperator>(E));
22682459 case Expr::CompoundAssignOperatorClass: {
0 commit comments