@@ -1315,10 +1315,14 @@ class LoopVectorizationCostModel {
1315
1315
1316
1316
/// Returns true if the target machine can represent \p V as a strided load
1317
1317
/// or store operation.
1318
- bool isLegalStridedLoadStore(Value *V, ElementCount VF) {
1318
+ bool isLegalStridedLoadStore(Value *V, ElementCount VF) const {
1319
1319
if (!isa<LoadInst, StoreInst>(V))
1320
1320
return false;
1321
1321
auto *Ty = getLoadStoreType(V);
1322
+ Value *Ptr = getLoadStorePointerOperand(V);
1323
+ // TODO: Support non-unit-reverse strided accesses.
1324
+ if (Legal->isConsecutivePtr(Ty, Ptr) != -1)
1325
+ return false;
1322
1326
Align Align = getLoadStoreAlignment(V);
1323
1327
if (VF.isVector())
1324
1328
Ty = VectorType::get(Ty, VF);
@@ -1659,7 +1663,8 @@ class LoopVectorizationCostModel {
1659
1663
InstructionCost getUniformMemOpCost(Instruction *I, ElementCount VF);
1660
1664
1661
1665
/// The cost computation for strided load/store instruction.
1662
- InstructionCost getStridedLoadStoreCost(Instruction *I, ElementCount VF);
1666
+ InstructionCost getStridedLoadStoreCost(Instruction *I,
1667
+ ElementCount VF) const;
1663
1668
1664
1669
/// Estimate the overhead of scalarizing an instruction. This is a
1665
1670
/// convenience wrapper for the type-based getScalarizationOverhead API.
@@ -5831,7 +5836,7 @@ LoopVectorizationCostModel::getInterleaveGroupCost(Instruction *I,
5831
5836
5832
5837
InstructionCost
5833
5838
LoopVectorizationCostModel::getStridedLoadStoreCost(Instruction *I,
5834
- ElementCount VF) {
5839
+ ElementCount VF) const {
5835
5840
Type *ValTy = getLoadStoreType(I);
5836
5841
auto *VectorTy = cast<VectorType>(toVectorTy(ValTy, VF));
5837
5842
const Align Alignment = getLoadStoreAlignment(I);
0 commit comments