-
Notifications
You must be signed in to change notification settings - Fork 15.3k
[LV] Convert uniform-address unmasked scatters to scalar store. #166114
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from 2 commits
e57c745
8f59bef
9ac48e0
2ee05b9
91f9eaf
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change | ||||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
|
|
@@ -1400,14 +1400,47 @@ static void narrowToSingleScalarRecipes(VPlan &Plan) { | |||||||||||||||
| for (VPBasicBlock *VPBB : VPBlockUtils::blocksOnly<VPBasicBlock>( | ||||||||||||||||
| vp_depth_first_shallow(Plan.getVectorLoopRegion()->getEntry()))) { | ||||||||||||||||
| for (VPRecipeBase &R : make_early_inc_range(reverse(*VPBB))) { | ||||||||||||||||
| if (!isa<VPWidenRecipe, VPWidenSelectRecipe, VPReplicateRecipe>(&R)) | ||||||||||||||||
| if (!isa<VPWidenRecipe, VPWidenSelectRecipe, VPReplicateRecipe, | ||||||||||||||||
| VPWidenMemoryRecipe>(&R)) | ||||||||||||||||
| continue; | ||||||||||||||||
| auto *RepR = dyn_cast<VPReplicateRecipe>(&R); | ||||||||||||||||
| if (RepR && (RepR->isSingleScalar() || RepR->isPredicated())) | ||||||||||||||||
| continue; | ||||||||||||||||
|
|
||||||||||||||||
| auto *RepOrWidenR = cast<VPSingleDefRecipe>(&R); | ||||||||||||||||
| if (RepR && isa<StoreInst>(RepR->getUnderlyingInstr()) && | ||||||||||||||||
| // Convert scatters with a uniform address that is unmasked into an | ||||||||||||||||
| // extract-last-element + scalar store. | ||||||||||||||||
| // TODO: Add a profitability check comparing the cost of a scatter vs. | ||||||||||||||||
| // extract + scalar store. | ||||||||||||||||
| auto *WidenStoreR = dyn_cast<VPWidenMemoryRecipe>(&R); | ||||||||||||||||
| if (WidenStoreR && vputils::isSingleScalar(WidenStoreR->getAddr()) && | ||||||||||||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is the reason we create a scatter initial, but can simplify it here due to some other VPlan transformation?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. In the widening decision, the store can be narrow to scalar store if its stored value and address are uniform. Sometimes the legacy CM is not powerful enough to determine if the stored value is uniform or not but vplan analysis can. Also in the VPlan transform stage, we can insert the extract last element (extract last active element in future) to extract the correct element to store. |
||||||||||||||||
| !WidenStoreR->isConsecutive() && | ||||||||||||||||
| isa<VPWidenStoreRecipe, VPWidenStoreEVLRecipe>(WidenStoreR)) { | ||||||||||||||||
|
||||||||||||||||
| auto *WidenStoreR = dyn_cast<VPWidenMemoryRecipe>(&R); | |
| if (WidenStoreR && vputils::isSingleScalar(WidenStoreR->getAddr()) && | |
| !WidenStoreR->isConsecutive() && | |
| isa<VPWidenStoreRecipe, VPWidenStoreEVLRecipe>(WidenStoreR)) { | |
| if (auto *WidenStoreR = dyn_cast<VPWidenStoreRecipe>(&R)) { | |
| if (WidenStoreR->isMasked() && vputils::isSingleScalar(WidenStoreR->getAddr()) && | |
| !WidenStoreR->isConsecutive()) { |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Updated, thanks!
Outdated
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
stray or
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Dropped, thanks!
Outdated
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think you need to extract the last element per part if the address is single -scalar, and ExtractLastElement if the address is uniform-across-vf-and-ufs (see similar code beklow for other stores). Probably also needs additional test coverage
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Updated, thanks!
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,51 @@ | ||
| ; NOTE: Assertions have been autogenerated by utils/update_test_checks.py UTC_ARGS: --version 5 | ||
| ; RUN: opt -passes=loop-vectorize -force-vector-width=2 -mtriple=riscv64 -mattr=+v -S %s | FileCheck %s | ||
|
||
| define void @truncate_i16_to_i8_cse(ptr noalias %src, ptr noalias %dst) { | ||
| ; CHECK-LABEL: define void @truncate_i16_to_i8_cse( | ||
| ; CHECK-SAME: ptr noalias [[SRC:%.*]], ptr noalias [[DST:%.*]]) #[[ATTR0:[0-9]+]] { | ||
| ; CHECK-NEXT: [[ENTRY:.*:]] | ||
| ; CHECK-NEXT: br label %[[VECTOR_PH:.*]] | ||
| ; CHECK: [[VECTOR_PH]]: | ||
| ; CHECK-NEXT: br label %[[VECTOR_BODY:.*]] | ||
| ; CHECK: [[VECTOR_BODY]]: | ||
| ; CHECK-NEXT: [[INDEX:%.*]] = phi i64 [ 0, %[[VECTOR_PH]] ], [ [[INDEX_NEXT:%.*]], %[[VECTOR_BODY]] ] | ||
| ; CHECK-NEXT: [[TMP0:%.*]] = load i16, ptr [[SRC]], align 2 | ||
| ; CHECK-NEXT: [[BROADCAST_SPLATINSERT:%.*]] = insertelement <2 x i16> poison, i16 [[TMP0]], i64 0 | ||
| ; CHECK-NEXT: [[BROADCAST_SPLAT:%.*]] = shufflevector <2 x i16> [[BROADCAST_SPLATINSERT]], <2 x i16> poison, <2 x i32> zeroinitializer | ||
| ; CHECK-NEXT: [[TMP1:%.*]] = trunc <2 x i16> [[BROADCAST_SPLAT]] to <2 x i8> | ||
| ; CHECK-NEXT: [[TMP2:%.*]] = extractelement <2 x i8> [[TMP1]], i32 1 | ||
| ; CHECK-NEXT: store i8 [[TMP2]], ptr null, align 1 | ||
| ; CHECK-NEXT: store i8 [[TMP2]], ptr [[DST]], align 1 | ||
| ; CHECK-NEXT: [[INDEX_NEXT]] = add nuw i64 [[INDEX]], 2 | ||
| ; CHECK-NEXT: [[TMP3:%.*]] = icmp eq i64 [[INDEX_NEXT]], 4294967296 | ||
| ; CHECK-NEXT: br i1 [[TMP3]], label %[[MIDDLE_BLOCK:.*]], label %[[VECTOR_BODY]], !llvm.loop [[LOOP0:![0-9]+]] | ||
| ; CHECK: [[MIDDLE_BLOCK]]: | ||
| ; CHECK-NEXT: br label %[[EXIT:.*]] | ||
| ; CHECK: [[EXIT]]: | ||
| ; CHECK-NEXT: ret void | ||
| ; | ||
| entry: | ||
| br label %loop | ||
|
|
||
| loop: | ||
| %iv = phi i64 [ 0, %entry ], [ %iv.next, %loop ] | ||
| %count = phi i32 [ 0, %entry ], [ %count.next, %loop ] | ||
| %val = load i16, ptr %src, align 2 | ||
| %val.zext = zext i16 %val to i64 | ||
| %val.trunc.zext = trunc i64 %val.zext to i8 | ||
| store i8 %val.trunc.zext, ptr null, align 1 | ||
|
||
| %val.trunc = trunc i16 %val to i8 | ||
| store i8 %val.trunc, ptr %dst, align 1 | ||
| %count.next = add i32 %count, 1 | ||
| %exitcond = icmp eq i32 %count.next, 0 | ||
| %iv.next = add i64 %iv, 1 | ||
| br i1 %exitcond, label %exit, label %loop | ||
|
|
||
| exit: | ||
| ret void | ||
| } | ||
| ;. | ||
| ; CHECK: [[LOOP0]] = distinct !{[[LOOP0]], [[META1:![0-9]+]], [[META2:![0-9]+]]} | ||
| ; CHECK: [[META1]] = !{!"llvm.loop.isvectorized", i32 1} | ||
| ; CHECK: [[META2]] = !{!"llvm.loop.unroll.runtime.disable"} | ||
| ;. | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Updated, thank!