@@ -1315,10 +1315,14 @@ class LoopVectorizationCostModel {
13151315
13161316 // / Returns true if the target machine can represent \p V as a strided load
13171317 // / or store operation.
1318- bool isLegalStridedLoadStore (Value *V, ElementCount VF) {
1318+ bool isLegalStridedLoadStore (Value *V, ElementCount VF) const {
13191319 if (!isa<LoadInst, StoreInst>(V))
13201320 return false ;
13211321 auto *Ty = getLoadStoreType (V);
1322+ Value *Ptr = getLoadStorePointerOperand (V);
1323+ // TODO: Support non-unit-reverse strided accesses.
1324+ if (Legal->isConsecutivePtr (Ty, Ptr) != -1 )
1325+ return false ;
13221326 Align Align = getLoadStoreAlignment (V);
13231327 if (VF.isVector ())
13241328 Ty = VectorType::get (Ty, VF);
@@ -1659,7 +1663,8 @@ class LoopVectorizationCostModel {
16591663 InstructionCost getUniformMemOpCost (Instruction *I, ElementCount VF);
16601664
16611665 // / The cost computation for strided load/store instruction.
1662- InstructionCost getStridedLoadStoreCost (Instruction *I, ElementCount VF);
1666+ InstructionCost getStridedLoadStoreCost (Instruction *I,
1667+ ElementCount VF) const ;
16631668
16641669 // / Estimate the overhead of scalarizing an instruction. This is a
16651670 // / convenience wrapper for the type-based getScalarizationOverhead API.
@@ -5831,7 +5836,7 @@ LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
58315836
58325837InstructionCost
58335838LoopVectorizationCostModel::getStridedLoadStoreCost (Instruction *I,
5834- ElementCount VF) {
5839+ ElementCount VF) const {
58355840 Type *ValTy = getLoadStoreType (I);
58365841 auto *VectorTy = cast<VectorType>(toVectorTy (ValTy, VF));
58375842 const Align Alignment = getLoadStoreAlignment (I);
0 commit comments