@@ -4625,6 +4625,9 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
4625
4625
break ;
4626
4626
case ISD::EXTRACT_SUBVECTOR: Res = WidenVecRes_EXTRACT_SUBVECTOR (N); break ;
4627
4627
case ISD::INSERT_VECTOR_ELT: Res = WidenVecRes_INSERT_VECTOR_ELT (N); break ;
4628
+ case ISD::ATOMIC_LOAD:
4629
+ Res = WidenVecRes_ATOMIC_LOAD (cast<AtomicSDNode>(N));
4630
+ break ;
4628
4631
case ISD::LOAD: Res = WidenVecRes_LOAD (N); break ;
4629
4632
case ISD::STEP_VECTOR:
4630
4633
case ISD::SPLAT_VECTOR:
@@ -6014,6 +6017,76 @@ SDValue DAGTypeLegalizer::WidenVecRes_INSERT_VECTOR_ELT(SDNode *N) {
6014
6017
N->getOperand (1 ), N->getOperand (2 ));
6015
6018
}
6016
6019
6020
+ // / Either return the same load or provide appropriate casts
6021
+ // / from the load and return that.
6022
+ static SDValue coerceLoadedValue (SDValue LdOp, EVT FirstVT, EVT WidenVT,
6023
+ TypeSize LdWidth, TypeSize FirstVTWidth,
6024
+ SDLoc dl, SelectionDAG &DAG) {
6025
+ assert (TypeSize::isKnownLE (LdWidth, FirstVTWidth));
6026
+ TypeSize WidenWidth = WidenVT.getSizeInBits ();
6027
+ if (!FirstVT.isVector ()) {
6028
+ unsigned NumElts =
6029
+ WidenWidth.getFixedValue () / FirstVTWidth.getFixedValue ();
6030
+ EVT NewVecVT = EVT::getVectorVT (*DAG.getContext (), FirstVT, NumElts);
6031
+ SDValue VecOp = DAG.getNode (ISD::SCALAR_TO_VECTOR, dl, NewVecVT, LdOp);
6032
+ return DAG.getNode (ISD::BITCAST, dl, WidenVT, VecOp);
6033
+ } else if (FirstVT == WidenVT)
6034
+ return LdOp;
6035
+ // TODO: We don't currently have any tests that exercise this code path.
6036
+ llvm_unreachable (" Unimplemented" );
6037
+ }
6038
+
6039
+ static std::optional<EVT> findMemType (SelectionDAG &DAG,
6040
+ const TargetLowering &TLI, unsigned Width,
6041
+ EVT WidenVT, unsigned Align,
6042
+ unsigned WidenEx);
6043
+
6044
+ SDValue DAGTypeLegalizer::WidenVecRes_ATOMIC_LOAD (AtomicSDNode *LD) {
6045
+ EVT WidenVT =
6046
+ TLI.getTypeToTransformTo (*DAG.getContext (), LD->getValueType (0 ));
6047
+ EVT LdVT = LD->getMemoryVT ();
6048
+ SDLoc dl (LD);
6049
+ assert (LdVT.isVector () && WidenVT.isVector () && " Expected vectors" );
6050
+ assert (LdVT.isScalableVector () == WidenVT.isScalableVector () &&
6051
+ " Must be scalable" );
6052
+ assert (LdVT.getVectorElementType () == WidenVT.getVectorElementType () &&
6053
+ " Expected equivalent element types" );
6054
+
6055
+ // Load information
6056
+ SDValue Chain = LD->getChain ();
6057
+ SDValue BasePtr = LD->getBasePtr ();
6058
+ MachineMemOperand::Flags MMOFlags = LD->getMemOperand ()->getFlags ();
6059
+ AAMDNodes AAInfo = LD->getAAInfo ();
6060
+
6061
+ TypeSize LdWidth = LdVT.getSizeInBits ();
6062
+ TypeSize WidenWidth = WidenVT.getSizeInBits ();
6063
+ TypeSize WidthDiff = WidenWidth - LdWidth;
6064
+
6065
+ // Find the vector type that can load from.
6066
+ std::optional<EVT> FirstVT =
6067
+ findMemType (DAG, TLI, LdWidth.getKnownMinValue (), WidenVT, /* LdAlign=*/ 0 ,
6068
+ WidthDiff.getKnownMinValue ());
6069
+
6070
+ if (!FirstVT)
6071
+ return SDValue ();
6072
+
6073
+ SmallVector<EVT, 8 > MemVTs;
6074
+ TypeSize FirstVTWidth = FirstVT->getSizeInBits ();
6075
+
6076
+ SDValue LdOp = DAG.getAtomicLoad (ISD::NON_EXTLOAD, dl, *FirstVT, *FirstVT,
6077
+ Chain, BasePtr, LD->getMemOperand ());
6078
+
6079
+ // Load the element with one instruction.
6080
+ SDValue Result =
6081
+ coerceLoadedValue (LdOp, *FirstVT, WidenVT, LdWidth, FirstVTWidth, dl,
6082
+ DAG);
6083
+
6084
+ // Modified the chain - switch anything that used the old chain to use
6085
+ // the new one.
6086
+ ReplaceValueWith (SDValue (LD, 1 ), LdOp.getValue (1 ));
6087
+ return Result;
6088
+ }
6089
+
6017
6090
SDValue DAGTypeLegalizer::WidenVecRes_LOAD (SDNode *N) {
6018
6091
LoadSDNode *LD = cast<LoadSDNode>(N);
6019
6092
ISD::LoadExtType ExtType = LD->getExtensionType ();
@@ -7897,27 +7970,8 @@ SDValue DAGTypeLegalizer::GenWidenVectorLoads(SmallVectorImpl<SDValue> &LdChain,
7897
7970
7898
7971
// Check if we can load the element with one instruction.
7899
7972
if (MemVTs.empty ()) {
7900
- assert (TypeSize::isKnownLE (LdWidth, FirstVTWidth));
7901
- if (!FirstVT->isVector ()) {
7902
- unsigned NumElts =
7903
- WidenWidth.getFixedValue () / FirstVTWidth.getFixedValue ();
7904
- EVT NewVecVT = EVT::getVectorVT (*DAG.getContext (), *FirstVT, NumElts);
7905
- SDValue VecOp = DAG.getNode (ISD::SCALAR_TO_VECTOR, dl, NewVecVT, LdOp);
7906
- return DAG.getNode (ISD::BITCAST, dl, WidenVT, VecOp);
7907
- }
7908
- if (FirstVT == WidenVT)
7909
- return LdOp;
7910
-
7911
- // TODO: We don't currently have any tests that exercise this code path.
7912
- assert (WidenWidth.getFixedValue () % FirstVTWidth.getFixedValue () == 0 );
7913
- unsigned NumConcat =
7914
- WidenWidth.getFixedValue () / FirstVTWidth.getFixedValue ();
7915
- SmallVector<SDValue, 16 > ConcatOps (NumConcat);
7916
- SDValue UndefVal = DAG.getUNDEF (*FirstVT);
7917
- ConcatOps[0 ] = LdOp;
7918
- for (unsigned i = 1 ; i != NumConcat; ++i)
7919
- ConcatOps[i] = UndefVal;
7920
- return DAG.getNode (ISD::CONCAT_VECTORS, dl, WidenVT, ConcatOps);
7973
+ return coerceLoadedValue (LdOp, *FirstVT, WidenVT, LdWidth, FirstVTWidth,
7974
+ dl, DAG);
7921
7975
}
7922
7976
7923
7977
// Load vector by using multiple loads from largest vector to scalar.
0 commit comments